gen_safe.cpp

  name="google_ads_frame" marginwidth="0" marginheight="0" src="http://pagead2.googlesyndication.com/pagead/ads?client=ca-pub-5572165936844014&dt=1194442938015&lmt=1194190197&format=336x280_as&output=html&correlator=1194442937843&url=file%3A%2F%2F%2FC%3A%2FDocuments%2520and%2520Settings%2Flhh1%2F%E6%A1%8C%E9%9D%A2%2FCLanguage.htm&color_bg=FFFFFF&color_text=000000&color_link=000000&color_url=FFFFFF&color_border=FFFFFF&ad_type=text&ga_vid=583001034.1194442938&ga_sid=1194442938&ga_hid=1942779085&flash=9&u_h=768&u_w=1024&u_ah=740&u_aw=1024&u_cd=32&u_tz=480&u_java=true" frameborder="0" width="336" scrolling="no" height="280" allowtransparency="allowtransparency"> #include <iostream.h>
#include "stdlib.h"

const int SIZE = 10;

template <class AType> class atype {
   AType a[SIZE];
 public:
   atype(void)
    {
      int i;

      for(i=0; i<SIZE; i++)
         a[i] = i;
    }
   AType &operator[](int i);
 };

template <class AType> AType &atype<AType>::operator[](int i)
 {
   if(i<0 || i> SIZE-1)
    {
      cout << endl << "Index value of ";
      cout << i << " is out of bounds." << endl;
    }
   return a[i];
 }

void main(void)
 {
   atype<int> int_array;
   atype<double> double_array;
   int i;

   cout << "Integer array: ";
   for(i=0; i<SIZE; i++)
      int_array[i] = i;
   for(i=0; i<SIZE; i++)
      cout << int_array[i] << " ";
   cout << endl;

   cout << "Double array: ";
   cout.precision(2);
   for(i=0; i<SIZE; i++)
      double_array[i] = (double)i/3;
   for(i=0; i<SIZE; i++)
      cout << double_array[i] << " ";
   cout << endl;

   int_array[12] = 100;                 // Calls overloaded array operator
 }

 

[2025-09-26T12:19:10.694Z] FAILED: out_odm/target/product/arctic/obj/SHARED_LIBRARIES/librecovery_ui_ext_intermediates/LINKED/librecovery_ui_ext.so [2025-09-26T12:19:10.695Z] /bin/bash -c "prebuilts/clang/host/linux-x86/clang-r450784d/bin/clang++ -nostdlib -Wl,-soname,librecovery_ui_ext.so -Wl,--gc-sections -shared -Wl,--whole-archive out_odm/target/product/arctic/obj/STATIC_LIBRARIES/librecovery_ui_default.recovery_intermediates/librecovery_ui_default.recovery.a -Wl,--no-whole-archive out_odm/target/product/arctic/obj/STATIC_LIBRARIES/ota_metadata_proto_cc.recovery_intermediates/ota_metadata_proto_cc.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libinstall.recovery_intermediates/libinstall.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libotautil.recovery_intermediates/libotautil.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libminui.recovery_intermediates/libminui.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libmiutil.recovery_intermediates/libmiutil.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/librecovery_utils.recovery_intermediates/librecovery_utils.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libc++fs_intermediates/libc++fs.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/librecovery_fastboot.recovery_intermediates/librecovery_fastboot.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libminui.recovery_intermediates/libminui.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/librecovery_utils.recovery_intermediates/librecovery_utils.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libotautil.recovery_intermediates/libotautil.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libsnapshot_nobinder_intermediates/libsnapshot_nobinder.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libfstab.recovery_intermediates/libfstab.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libminadbd_services.recovery_intermediates/libminadbd_services.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libhealthhalutils.recovery_intermediates/libhealthhalutils.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libvintf.recovery_intermediates/libvintf.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libcrypto_utils_intermediates/libcrypto_utils.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libfusesideload.recovery_intermediates/libfusesideload.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/librecovery_ui.recovery_intermediates/librecovery_ui.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libmiutil.recovery_intermediates/libmiutil.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libadbd_services.recovery_intermediates/libadbd_services.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libadbd_core.recovery_intermediates/libadbd_core.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libapp_processes_protos_lite.recovery_intermediates/libapp_processes_protos_lite.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/update_metadata-protos_intermediates/update_metadata-protos.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libdiagnose_usb_intermediates/libdiagnose_usb.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libbrotli_intermediates/libbrotli.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libadb_crypto_static.recovery_intermediates/libadb_crypto_static.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libadb_tls_connection_static.recovery_intermediates/libadb_tls_connection_static.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libc++demangle.recovery_intermediates/libc++demangle.recovery.a out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libclang_rt.ubsan_minimal.recovery_intermediates/libclang_rt.ubsan_minimal.recovery.a prebuilts/clang/host/linux-x86/clang-r450784d/lib64/clang/14.0.6/lib/linux//libclang_rt.builtins-aarch64-android.a -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--build-id=md5 -Wl,--fatal-warnings -Wl,--no-undefined-version -Wl,--exclude-libs,libgcc.a -Wl,--exclude-libs,libgcc_stripped.a -Wl,--exclude-libs,libunwind_llvm.a -Wl,--exclude-libs,libunwind.a -Wl,--icf=safe -fuse-ld=lld -Wl,--hash-style=gnu -Wl,-z,separate-code -Wl,-z,max-page-size=4096 -Wl,--pack-dyn-relocs=android+relr -Wl,--use-android-relr-tags -Wl,--exclude-libs,libclang_rt.ubsan_minimal.a -Wl,--no-undefined -target aarch64-linux-android10000 out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libbase.recovery_intermediates/libbase.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/liblog.recovery_intermediates/liblog.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/librecovery_ui.recovery_intermediates/librecovery_ui.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libcrypto.recovery_intermediates/libcrypto.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libcutils.recovery_intermediates/libcutils.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libext4_utils_intermediates/libext4_utils.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libfs_mgr_intermediates/libfs_mgr.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libutils.recovery_intermediates/libutils.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libz.recovery_intermediates/libz.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libziparchive.recovery_intermediates/libziparchive.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libssl.recovery_intermediates/libssl.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libfusesideload.recovery_intermediates/libfusesideload.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libbootloader_message.recovery_intermediates/libbootloader_message.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libpng.recovery_intermediates/libpng.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/android.hardware.boot@1.0.recovery_intermediates/android.hardware.boot@1.0.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/android.hardware.boot@1.1.recovery_intermediates/android.hardware.boot@1.1.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libbase.recovery_intermediates/libbase.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libbootloader_message.recovery_intermediates/libbootloader_message.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/liblp_intermediates/liblp.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/liblog.recovery_intermediates/liblog.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libprotobuf-cpp-lite.recovery_intermediates/libprotobuf-cpp-lite.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/android.hardware.health@2.0.recovery_intermediates/android.hardware.health@2.0.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libhidlbase.recovery_intermediates/libhidlbase.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libhidl-gen-utils.recovery_intermediates/libhidl-gen-utils.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libselinux.recovery_intermediates/libselinux.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libtinyxml2.recovery_intermediates/libtinyxml2.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libasyncio.recovery_intermediates/libasyncio.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libadbd_auth_intermediates/libadbd_auth.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libmdnssd_intermediates/libmdnssd.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libadbconnection_server.recovery_intermediates/libadbconnection_server.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libc++.recovery_intermediates/libc++.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libc.recovery_intermediates/libc.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libm.recovery_intermediates/libm.recovery.so out_odm/target/product/arctic/obj/SHARED_LIBRARIES/libdl.recovery_intermediates/libdl.recovery.so -o out_odm/target/product/arctic/obj/SHARED_LIBRARIES/librecovery_ui_ext_intermediates/LINKED/librecovery_ui_ext.so" [2025-09-26T12:19:10.695Z] ld.lld: error: undefined symbol: atexit [2025-09-26T12:19:10.695Z] >>> referenced by mdns.cpp:198 (packages/modules/adb/daemon/mdns.cpp:198) [2025-09-26T12:19:10.695Z] >>> mdns.o:(setup_mdns(int)) in archive out_odm/target/product/arctic/obj/STATIC_LIBRARIES/libadbd_services.recovery_intermediates/libadbd_services.recovery.a [2025-09-26T12:19:10.695Z] clang-14: error: linker command failed with exit code 1 (use -v to see invocation)
09-27
#include "PointCloudProjection.h" #include <pcl/filters/extract_indices.h> #include <pcl/common/transforms.h> #include <pcl/io/pcd_io.h> #include <fstream> #include <stdexcept> #include <cmath> #include <algorithm> #include <iostream> #include "json.hpp" using json = nlohmann::json; // 移除 using namespace Internal; 避免作用域冲突 // 内部类方法实现(保持不变) std::vector<int> Internal::OrientedBBox::get_points_in_box(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr& cloud) const { std::vector<int> inside_indices; pcl::PointCloud<pcl::PointXYZ>::Ptr transformed_cloud(new pcl::PointCloud<pcl::PointXYZ>()); pcl::transformPointCloud(*cloud, *transformed_cloud, this->transform); constexpr float n = 2.0f; const float half_x = this->extent[0] / 2.0f * n; const float half_y = this->extent[1] / 2.0f * n; const float half_z = this->extent[2] / 2.0f * n; for (size_t i = 0; i < transformed_cloud->points.size(); ++i) { const auto& pt = transformed_cloud->points[i]; if (std::fabs(pt.x) <= half_x && std::fabs(pt.y) <= half_y && std::fabs(pt.z) <= half_z) { inside_indices.push_back(static_cast<int>(i)); } } std::cout << "------------------------》OBB内筛选到的点数量:" << inside_indices.size() << std::endl; return inside_indices; } std::vector<Eigen::Vector3f> Internal::OrientedBBox::get_corners() const { std::vector<Eigen::Vector3f> corners; const Eigen::Vector3f half_ext = this->extent / 2.0f; std::vector<Eigen::Vector3f> local_corners = { {half_ext.x(), half_ext.y(), half_ext.z()}, {half_ext.x(), half_ext.y(), -half_ext.z()}, {half_ext.x(), -half_ext.y(), half_ext.z()}, {half_ext.x(), -half_ext.y(), -half_ext.z()}, {-half_ext.x(), half_ext.y(), half_ext.z()}, {-half_ext.x(), half_ext.y(), -half_ext.z()}, {-half_ext.x(), -half_ext.y(), half_ext.z()}, {-half_ext.x(), -half_ext.y(), -half_ext.z()} }; const Eigen::Affine3f local_to_global = this->transform.inverse(); for (const auto& lc : local_corners) { corners.push_back(local_to_global * lc); } return corners; } Eigen::Matrix3f Internal::OrientedBBox::get_axes() const { return this->transform.inverse().linear(); } Eigen::Affine3f Internal::OrientedBBox::get_global_to_local() const { return this->transform; } Eigen::Affine3f Internal::OrientedBBox::get_local_to_global() const { return this->transform.inverse(); } // 获取指定面的顶点和中心(全局坐标) bool Internal::OrientedBBox::get_face_info(FaceType face, std::vector<Eigen::Vector3f>& out_vertices, Eigen::Vector3f& out_center) const { out_vertices.reserve(4); Eigen::Vector3f half_ext = extent / 2.0f; Eigen::Affine3f local_to_global = get_local_to_global(); // 局部→全局变换 // 局部坐标系下的面顶点(顺时针顺序) std::vector<Eigen::Vector3f> local_vertices; switch (face) { case FaceType::X_POS: local_vertices = { {half_ext.x(), half_ext.y(), half_ext.z()}, {half_ext.x(), half_ext.y(), -half_ext.z()}, {half_ext.x(), -half_ext.y(), -half_ext.z()}, {half_ext.x(), -half_ext.y(), half_ext.z()} }; out_center = local_to_global * Eigen::Vector3f(half_ext.x(), 0, 0); break; case FaceType::Y_POS: local_vertices = { {half_ext.x(), half_ext.y(), half_ext.z()}, {half_ext.x(), half_ext.y(), -half_ext.z()}, {-half_ext.x(), half_ext.y(), -half_ext.z()}, {-half_ext.x(), half_ext.y(), half_ext.z()} }; out_center = local_to_global * Eigen::Vector3f(0, half_ext.y(), 0); break; case FaceType::Z_POS: local_vertices = { {half_ext.x(), half_ext.y(), half_ext.z()}, {half_ext.x(), -half_ext.y(), half_ext.z()}, {-half_ext.x(), -half_ext.y(), half_ext.z()}, {-half_ext.x(), half_ext.y(), half_ext.z()} }; out_center = local_to_global * Eigen::Vector3f(0, 0, half_ext.z()); break; default: return false; } // 转换到全局坐标 for (const auto& lv : local_vertices) { out_vertices.push_back(local_to_global * lv); } return true; } // 获取面的局部坐标系(u:面内X轴, v:面内Y轴, n:法向量)- 内联实现 bool Internal::OrientedBBox::get_face_local_axes(FaceType face, Eigen::Vector3f& out_u, Eigen::Vector3f& out_v, Eigen::Vector3f& out_n) const { std::vector<Eigen::Vector3f> vertices; Eigen::Vector3f face_center; if (!get_face_info(face, vertices, face_center)) { return false; } // 计算法向量n(面的外法线,单位向量) Eigen::Vector3f v1 = vertices[1] - vertices[0]; Eigen::Vector3f v2 = vertices[2] - vertices[0]; out_n = v1.cross(v2).normalized(); if (out_n.norm() < 1e-6f) { return false; // 面退化(非平面) } // 计算面内X轴u(从面中心到第一个顶点,投影到面内) Eigen::Vector3f center_to_v0 = vertices[0] - face_center; out_u = center_to_v0 - (center_to_v0.dot(out_n)) * out_n; // 去除法向分量 out_u.normalize(); if (out_u.norm() < 1e-6f) { // 退化处理 out_u = Eigen::Vector3f::UnitX(); if (std::fabs(out_u.dot(out_n)) > 0.9f) { out_u = Eigen::Vector3f::UnitY(); } out_u -= (out_u.dot(out_n)) * out_n; // 确保在面内 out_u.normalize(); } // 计算面内Y轴v(n × u,确保正交) out_v = out_n.cross(out_u).normalized(); return true; } // 绕面的法向量旋转(左旋转为正角度,右旋转为负角度)- 内联实现 Internal::OrientedBBox Internal::OrientedBBox::rotate_around_face_normal(FaceType face, float theta_deg) const { Eigen::Vector3f u, v, n; if (!get_face_local_axes(face, u, v, n)) { throw std::runtime_error("获取面坐标系失败,无法旋转"); } // 角度转换(左旋转+θ,右旋转-θ) float theta = theta_deg * M_PI / 180.0f; // 构建局部→全局转换矩阵M(u, v, n为列向量) Eigen::Matrix3f M; M.col(0) = u; M.col(1) = v; M.col(2) = n; // 局部坐标系下绕Z轴旋转的矩阵(显式转换为旋转矩阵) Eigen::Vector3f z_axis(0.0f, 0.0f, 1.0f); // Z轴向量 Eigen::AngleAxisf angle_axis(theta, z_axis); // 角度轴 Eigen::Matrix3f Rz = angle_axis.toRotationMatrix(); // 显式转换为3x3旋转矩阵 // 全局旋转矩阵 = M * Rz * M^T Eigen::Matrix3f R = M * Rz * M.transpose(); // 计算新的旋转矩阵 Eigen::Matrix3f current_rot = transform.inverse().linear(); Eigen::Matrix3f new_rot = R * current_rot; // 构建新的变换矩阵 Eigen::Affine3f new_local_to_global = Eigen::Affine3f::Identity(); new_local_to_global.rotate(new_rot); new_local_to_global.translate(center); // 新的全局→局部变换矩阵 Eigen::Affine3f new_transform = new_local_to_global.inverse(); // 打印new_transform的内容(旋转矩阵 + 平移向量) std::cout << "===== 新的全局→局部变换矩阵 (new_transform) =====" << std::endl; std::cout << "旋转部分(线性矩阵):" << std::endl << new_transform.linear() << std::endl; std::cout << "平移部分:" << new_transform.translation().transpose() << std::endl << std::endl; return OrientedBBox(center, extent, new_transform.inverse()); } // 内部工具函数实现(显式指定Internal::命名空间) Internal::OrientedBBox Internal::parse_obb(const std::vector<ObbData>& obb_data) { Internal::OrientedBBox obb; for (const auto& data : obb_data) { if (data.obj_type == "负防区") { continue; } const Eigen::Vector3f center(data.pos_x, data.pos_y, data.pos_z); const Eigen::Vector3f dimensions( std::fabs(data.scale_x), std::fabs(data.scale_y), std::fabs(data.scale_z) ); Eigen::Affine3f transform_to_local = Eigen::Affine3f::Identity(); transform_to_local.rotate(Eigen::AngleAxisf(data.rot_x, Eigen::Vector3f::UnitX())); transform_to_local.rotate(Eigen::AngleAxisf(data.rot_y, Eigen::Vector3f::UnitY())); transform_to_local.rotate(Eigen::AngleAxisf(data.rot_z, Eigen::Vector3f::UnitZ())); obb = Internal::OrientedBBox(center, dimensions, transform_to_local); break; } return obb; } cv::Mat Internal::gen_proj_internal( const pcl::PointCloud<pcl::PointXYZ>::ConstPtr& local_cloud, const Internal::OrientedBBox& obb, const Eigen::Vector3f& cam_pos, std::vector<cv::Point>& out_corners, cv::Point& out_center, float cam_rot_deg) { const cv::Size img_size(1280, 640); cv::Mat img(img_size, CV_8UC3, cv::Scalar(0, 0, 0)); const Eigen::Vector3f local_obb_center(0.0f, 0.0f, 0.0f); // OBB在局部坐标系的中心 // 1. 计算投影方向和相机坐标系 Eigen::Vector3f proj_dir = local_obb_center - cam_pos; const float dir_len = proj_dir.norm(); if (dir_len < 1e-6f) { proj_dir = Eigen::Vector3f::UnitZ(); // 避免除以零 } else { proj_dir.normalize(); } const Eigen::Vector3f world_up(0.0f, 0.0f, 1.0f); Eigen::Vector3f proj_x = world_up.cross(proj_dir); if (proj_x.norm() < 1e-6f) { proj_x = Eigen::Vector3f::UnitX(); // 避免退化 } else { proj_x.normalize(); } Eigen::Vector3f proj_y = proj_dir.cross(proj_x); proj_y.normalize(); // 2. 计算点云的投影坐标(用于绘制点) std::vector<Eigen::Vector3f> proj_pts; proj_pts.reserve(local_cloud->size()); Eigen::Matrix3f rotation = Eigen::Matrix3f::Identity(); if (std::fabs(cam_rot_deg) > 1e-6f) { const float angle_rad = cam_rot_deg * M_PI / 180.0f; rotation << std::cos(angle_rad), -std::sin(angle_rad), 0.0f, std::sin(angle_rad), std::cos(angle_rad), 0.0f, 0.0f, 0.0f, 1.0f; } for (const auto& pt : local_cloud->points) { const Eigen::Vector3f pt_eigen(pt.x, pt.y, pt.z); const Eigen::Vector3f pt_cam = pt_eigen - cam_pos; Eigen::Vector3f temp_point( pt_cam.dot(proj_x), pt_cam.dot(proj_y), pt_cam.dot(proj_dir) ); if (std::fabs(cam_rot_deg) > 1e-6f) { temp_point = rotation * temp_point; } proj_pts.push_back(temp_point); } // 3. 计算OBB角点的投影坐标(用于计算缩放比例和绘制边框) const Eigen::Vector3f half_ext = obb.extent / 2.0f; std::vector<Eigen::Vector3f> local_corners = { {half_ext.x(), half_ext.y(), half_ext.z()}, {half_ext.x(), half_ext.y(), -half_ext.z()}, {half_ext.x(), -half_ext.y(), half_ext.z()}, {half_ext.x(), -half_ext.y(), -half_ext.z()}, {-half_ext.x(), half_ext.y(), half_ext.z()}, {-half_ext.x(), half_ext.y(), -half_ext.z()}, {-half_ext.x(), -half_ext.y(), half_ext.z()}, {-half_ext.x(), -half_ext.y(), -half_ext.z()} }; std::vector<Eigen::Vector3f> corner_proj; for (const auto& corner : local_corners) { const Eigen::Vector3f corner_cam = corner - cam_pos; Eigen::Vector3f temp_point( corner_cam.dot(proj_x), corner_cam.dot(proj_y), corner_cam.dot(proj_dir) ); if (std::fabs(cam_rot_deg) > 1e-6f) { temp_point = rotation * temp_point; } corner_proj.push_back(temp_point); } // 4. 关键修改:用OBB角点的投影范围计算缩放比例(确保同一OBB大小一致) float obb_x_min = 1e9f, obb_x_max = -1e9f; float obb_y_min = 1e9f, obb_y_max = -1e9f; for (const auto& cp : corner_proj) { obb_x_min = std::min(obb_x_min, cp.x()); obb_x_max = std::max(obb_x_max, cp.x()); obb_y_min = std::min(obb_y_min, cp.y()); obb_y_max = std::max(obb_y_max, cp.y()); } const float obb_max_range = std::max(obb_x_max - obb_x_min, obb_y_max - obb_y_min); const float safe_obb_range = (obb_max_range < 1e-6f) ? 1e-6f : obb_max_range; const float scale = std::min(img_size.width, img_size.height) * 0.8f / safe_obb_range; // 0.8是预留边距(避免OBB边缘超出图像),可根据需要调整 // 5. 投影中心与OBB中心对齐(确保OBB在图像中居中) Eigen::Vector3f center_cam = local_obb_center - cam_pos; Eigen::Vector3f proj_center( center_cam.dot(proj_x), center_cam.dot(proj_y), center_cam.dot(proj_dir) ); if (std::fabs(cam_rot_deg) > 1e-6f) { proj_center = rotation * proj_center; } const cv::Point img_center(img_size.width / 2, img_size.height / 2); // 6. 绘制点云 for (const auto& p : proj_pts) { const int x = static_cast<int>(img_center.x + (p.x() - proj_center.x()) * scale); const int y = static_cast<int>(img_center.y - (p.y() - proj_center.y()) * scale); const int clamped_x = std::clamp(x, 0, img_size.width - 1); const int clamped_y = std::clamp(y, 0, img_size.height - 1); cv::drawMarker(img, cv::Point(clamped_x, clamped_y), cv::Scalar(255, 255, 255), 1, // marker大小 1); // 线宽 } // 7. 处理OBB角点并绘制边框 std::vector<int> corner_indices = {0, 1, 2, 3, 4, 5, 6, 7}; // 按Z轴(深度)排序,只保留前4个可见角点 std::sort(corner_indices.begin(), corner_indices.end(), [&](int a, int b) { return corner_proj[a].z() > corner_proj[b].z(); }); corner_indices.resize(4); // 按角度排序,确保边框绘制顺序正确 Eigen::Vector3f centroid(0.0f, 0.0f, 0.0f); for (int idx : corner_indices) { centroid += corner_proj[idx]; } centroid /= 4.0f; std::sort(corner_indices.begin(), corner_indices.end(), [&](int a, int b) { const float ang_a = std::atan2(corner_proj[a].y() - centroid.y(), corner_proj[a].x() - centroid.x()); const float ang_b = std::atan2(corner_proj[b].y() - centroid.y(), corner_proj[b].x() - centroid.x()); return ang_a < ang_b; }); // 映射角点到图像坐标 out_corners.clear(); for (int idx : corner_indices) { const int x = static_cast<int>(img_center.x + (corner_proj[idx].x() - proj_center.x()) * scale); const int y = static_cast<int>(img_center.y - (corner_proj[idx].y() - proj_center.y()) * scale); out_corners.emplace_back(std::clamp(x, 0, img_size.width - 1), std::clamp(y, 0, img_size.height - 1)); } // 绘制OBB边框 for (size_t i = 0; i < 4; ++i) { cv::line(img, out_corners[i], out_corners[(i + 1) % 4], cv::Scalar(0, 255, 0), 2, cv::LINE_AA); } // 8. 绘制OBB中心点(红点) Eigen::Vector3f temp_center = proj_center; // 已与OBB中心对齐 const int cx = static_cast<int>(img_center.x + (temp_center.x() - proj_center.x()) * scale); const int cy = static_cast<int>(img_center.y - (temp_center.y() - proj_center.y()) * scale); out_center = cv::Point(std::clamp(cx, 0, img_size.width - 1), std::clamp(cy, 0, img_size.height - 1)); cv::circle(img, out_center, 5, cv::Scalar(0, 0, 255), -1); return img; } // 内存级接口实现(调用内部函数时显式指定Internal::) int get_proj_results( const pcl::PointCloud<pcl::PointXYZ>::ConstPtr& cloud, const std::vector<ObbData>& obb_data, ProjResult* results, bool use_all_points) { try { if (!cloud || cloud->empty()) return -1; if (obb_data.empty()) return -2; if (!results) return -3; // 显式调用Internal::parse_obb const Internal::OrientedBBox obb = Internal::parse_obb(obb_data); if (obb.extent.norm() < 1e-6f) return -4; pcl::PointCloud<pcl::PointXYZ>::Ptr processed_cloud; if (use_all_points) { processed_cloud = cloud->makeShared(); } else { const std::vector<int> in_obb_indices = obb.get_points_in_box(cloud); std::cout << "------------------------???" << in_obb_indices.size() << std::endl; if (in_obb_indices.empty()) return -5; pcl::PointCloud<pcl::PointXYZ>::Ptr in_obb_pcd(new pcl::PointCloud<pcl::PointXYZ>()); pcl::ExtractIndices<pcl::PointXYZ> extractor; pcl::PointIndices::Ptr inliers(new pcl::PointIndices()); inliers->indices = in_obb_indices; extractor.setInputCloud(cloud); extractor.setIndices(inliers); extractor.setNegative(false); extractor.filter(*in_obb_pcd); processed_cloud = in_obb_pcd; } pcl::PointCloud<pcl::PointXYZ>::Ptr local_cloud(new pcl::PointCloud<pcl::PointXYZ>()); pcl::transformPointCloud(*processed_cloud, *local_cloud, obb.get_global_to_local()); const Eigen::Vector3f half_ext = obb.extent / 2.0f; const float cam_distance = 15.0f; const std::vector<Eigen::Vector3f> cam_positions = { {half_ext.x() + cam_distance, 0.0f, 0.0f}, {0.0f, half_ext.y() + cam_distance, 0.0f}, {0.0f, 0.0f, half_ext.z() + cam_distance} }; const float cam_rotations[] = {0.0f, 0.0f, 90.0f}; for (size_t i = 0; i < 3; ++i) { std::vector<cv::Point> obb_corners; cv::Point obb_center; // 显式调用Internal::gen_proj_internal const cv::Mat proj_img = Internal::gen_proj_internal( local_cloud, obb, cam_positions[i], obb_corners, obb_center, cam_rotations[i] ); if (proj_img.empty()) return -6; results[i].img = proj_img.clone(); results[i].obb_corners = obb_corners; results[i].obb_center = obb_center; } return 0; } catch (...) { return -99; } } // 文件级接口实现 int gen_proj_images( const char* pcd_path, const char* json_path, bool use_all_points) { try { if (!pcd_path || !json_path || strlen(pcd_path) == 0 || strlen(json_path) == 0) return -1; if (!std::ifstream(pcd_path).good()) return -2; if (!std::ifstream(json_path).good()) return -3; pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>()); if (pcl::io::loadPCDFile<pcl::PointXYZ>(pcd_path, *cloud) == -1) return -4; if (cloud->empty()) return -5; std::vector<ObbData> obb_data_list; json j; std::ifstream json_file(json_path); json_file >> j; json obb_json_array; if (j.is_object()) { obb_json_array.push_back(j); } else if (j.is_array()) { obb_json_array = j; } else { return -6; } for (const auto& item : obb_json_array) { ObbData obb_data; obb_data.pos_x = item.at("position").at("x").get<float>(); obb_data.pos_y = item.at("position").at("y").get<float>(); obb_data.pos_z = item.at("position").at("z").get<float>(); obb_data.rot_x = item.at("rotation").at("x").get<float>(); obb_data.rot_y = item.at("rotation").at("y").get<float>(); obb_data.rot_z = item.at("rotation").at("z").get<float>(); obb_data.scale_x = item.at("scale").at("x").get<float>(); obb_data.scale_y = item.at("scale").at("y").get<float>(); obb_data.scale_z = item.at("scale").at("z").get<float>(); obb_data.obj_type = item.value("obj_type", ""); obb_data_list.push_back(obb_data); } ProjResult results[3]; int ret = get_proj_results(cloud, obb_data_list, results, use_all_points); if (ret != 0) return ret; const std::string save_paths[3] = { "OBB_Local_X+_View_Original.png", "OBB_Local_Y+_View_Original.png", "OBB_Local_Z+_View_Rotated90deg.png" }; for (size_t i = 0; i < 3; ++i) { if (!cv::imwrite(save_paths[i], results[i].img)) { free_proj_results(results); return -7; } } free_proj_results(results); return 0; } catch (...) { return -99; } } // 内存释放接口 void free_proj_results(ProjResult* results) { if (results) { for (int i = 0; i < 3; ++i) { results[i].img.release(); results[i].obb_corners.clear(); } } } 这个代码还是会导致boxwidget 变形
最新发布
10-26
/* * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "api/neteq/neteq.h" #include <math.h> #include <stdlib.h> #include <string.h> // memset #include <algorithm> #include <memory> #include <set> #include <string> #include <vector> #include "absl/flags/flag.h" #include "api/audio/audio_frame.h" #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "modules/audio_coding/codecs/pcm16b/pcm16b.h" #include "modules/audio_coding/neteq/test/neteq_decoding_test.h" #include "modules/audio_coding/neteq/tools/audio_loop.h" #include "modules/audio_coding/neteq/tools/neteq_rtp_dump_input.h" #include "modules/audio_coding/neteq/tools/neteq_test.h" #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "rtc_base/ignore_wundef.h" #include "rtc_base/message_digest.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/system/arch.h" #include "test/field_trial.h" #include "test/gtest.h" #include "test/testsupport/file_utils.h" ABSL_FLAG(bool, gen_ref, false, "Generate reference files."); namespace webrtc { #if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \ (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \ defined(WEBRTC_CODEC_ILBC) #define MAYBE_TestBitExactness TestBitExactness #else #define MAYBE_TestBitExactness DISABLED_TestBitExactness #endif TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"); const std::string output_checksum = "dee7a10ab92526876a70a85bc48a4906901af3df"; const std::string network_stats_checksum = "911dbf5fd97f48d25b8f0967286eb73c9d6f6158"; DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); } #if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS) #define MAYBE_TestOpusBitExactness TestOpusBitExactness #else #define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness #endif TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp"); const std::string output_checksum = "fec6827bb9ee0b21770bbbb4a3a6f8823bf537dc|" "3610cc7be4b3407b9c273b1299ab7f8f47cca96b"; const std::string network_stats_checksum = "3d043e47e5f4bb81d37e7bce8c44bf802965c853|" "076662525572dba753b11578330bd491923f7f5e"; DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); } #if defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && \ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && defined(WEBRTC_CODEC_OPUS) #define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness #else #define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness #endif TEST_F(NetEqDecodingTest, MAYBE_TestOpusDtxBitExactness) { const std::string input_rtp_file = webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp"); const std::string output_checksum = "b3c4899eab5378ef5e54f2302948872149f6ad5e|" "589e975ec31ea13f302457fea1425be9380ffb96"; const std::string network_stats_checksum = "dc8447b9fee1a21fd5d1f4045d62b982a3fb0215"; DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum, absl::GetFlag(FLAGS_gen_ref)); } // Use fax mode to avoid time-scaling. This is to simplify the testing of // packet waiting times in the packet buffer. class NetEqDecodingTestFaxMode : public NetEqDecodingTest { protected: NetEqDecodingTestFaxMode() : NetEqDecodingTest() { config_.for_test_no_time_stretching = true; } void TestJitterBufferDelay(bool apply_packet_loss); }; TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) { // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio. size_t num_frames = 30; const size_t kSamples = 10 * 16; const size_t kPayloadBytes = kSamples * 2; for (size_t i = 0; i < num_frames; ++i) { const uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i); rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples); rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC. rtp_info.payloadType = 94; // PCM16b WB codec. rtp_info.markerBit = 0; ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); } // Pull out all data. for (size_t i = 0; i < num_frames; ++i) { bool muted; ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } NetEqNetworkStatistics stats; EXPECT_EQ(0, neteq_->NetworkStatistics(&stats)); // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms // spacing (per definition), we expect the delay to increase with 10 ms for // each packet. Thus, we are calculating the statistics for a series from 10 // to 300, in steps of 10 ms. EXPECT_EQ(155, stats.mean_waiting_time_ms); EXPECT_EQ(155, stats.median_waiting_time_ms); EXPECT_EQ(10, stats.min_waiting_time_ms); EXPECT_EQ(300, stats.max_waiting_time_ms); // Check statistics again and make sure it's been reset. EXPECT_EQ(0, neteq_->NetworkStatistics(&stats)); EXPECT_EQ(-1, stats.mean_waiting_time_ms); EXPECT_EQ(-1, stats.median_waiting_time_ms); EXPECT_EQ(-1, stats.min_waiting_time_ms); EXPECT_EQ(-1, stats.max_waiting_time_ms); } TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) { // Apply a clock drift of -25 ms / s (sender faster than receiver). const double kDriftFactor = 1000.0 / (1000.0 + 25.0); const double kNetworkFreezeTimeMs = 0.0; const bool kGetAudioDuringFreezeRecovery = false; const int kDelayToleranceMs = 20; const int kMaxTimeToSpeechMs = 100; LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs, kGetAudioDuringFreezeRecovery, kDelayToleranceMs, kMaxTimeToSpeechMs); } TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) { // Apply a clock drift of +25 ms / s (sender slower than receiver). const double kDriftFactor = 1000.0 / (1000.0 - 25.0); const double kNetworkFreezeTimeMs = 0.0; const bool kGetAudioDuringFreezeRecovery = false; const int kDelayToleranceMs = 40; const int kMaxTimeToSpeechMs = 100; LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs, kGetAudioDuringFreezeRecovery, kDelayToleranceMs, kMaxTimeToSpeechMs); } TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) { // Apply a clock drift of -25 ms / s (sender faster than receiver). const double kDriftFactor = 1000.0 / (1000.0 + 25.0); const double kNetworkFreezeTimeMs = 5000.0; const bool kGetAudioDuringFreezeRecovery = false; const int kDelayToleranceMs = 60; const int kMaxTimeToSpeechMs = 200; LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs, kGetAudioDuringFreezeRecovery, kDelayToleranceMs, kMaxTimeToSpeechMs); } TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) { // Apply a clock drift of +25 ms / s (sender slower than receiver). const double kDriftFactor = 1000.0 / (1000.0 - 25.0); const double kNetworkFreezeTimeMs = 5000.0; const bool kGetAudioDuringFreezeRecovery = false; const int kDelayToleranceMs = 40; const int kMaxTimeToSpeechMs = 100; LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs, kGetAudioDuringFreezeRecovery, kDelayToleranceMs, kMaxTimeToSpeechMs); } TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) { // Apply a clock drift of +25 ms / s (sender slower than receiver). const double kDriftFactor = 1000.0 / (1000.0 - 25.0); const double kNetworkFreezeTimeMs = 5000.0; const bool kGetAudioDuringFreezeRecovery = true; const int kDelayToleranceMs = 40; const int kMaxTimeToSpeechMs = 100; LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs, kGetAudioDuringFreezeRecovery, kDelayToleranceMs, kMaxTimeToSpeechMs); } TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) { const double kDriftFactor = 1.0; // No drift. const double kNetworkFreezeTimeMs = 0.0; const bool kGetAudioDuringFreezeRecovery = false; const int kDelayToleranceMs = 10; const int kMaxTimeToSpeechMs = 50; LongCngWithClockDrift(kDriftFactor, kNetworkFreezeTimeMs, kGetAudioDuringFreezeRecovery, kDelayToleranceMs, kMaxTimeToSpeechMs); } TEST_F(NetEqDecodingTest, UnknownPayloadType) { const size_t kPayloadBytes = 100; uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; PopulateRtpInfo(0, 0, &rtp_info); rtp_info.payloadType = 1; // Not registered as a decoder. EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload)); } #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX) #define MAYBE_DecoderError DecoderError #else #define MAYBE_DecoderError DISABLED_DecoderError #endif TEST_F(NetEqDecodingTest, MAYBE_DecoderError) { const size_t kPayloadBytes = 100; uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; PopulateRtpInfo(0, 0, &rtp_info); rtp_info.payloadType = 103; // iSAC, but the payload is invalid. EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); // Set all of `out_data_` to 1, and verify that it was set to 0 by the call // to GetAudio. int16_t* out_frame_data = out_frame_.mutable_data(); for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { out_frame_data[i] = 1; } bool muted; EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_FALSE(muted); // Verify that the first 160 samples are set to 0. static const int kExpectedOutputLength = 160; // 10 ms at 16 kHz sample rate. const int16_t* const_out_frame_data = out_frame_.data(); for (int i = 0; i < kExpectedOutputLength; ++i) { rtc::StringBuilder ss; ss << "i = " << i; SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. EXPECT_EQ(0, const_out_frame_data[i]); } } TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) { // Set all of `out_data_` to 1, and verify that it was set to 0 by the call // to GetAudio. int16_t* out_frame_data = out_frame_.mutable_data(); for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { out_frame_data[i] = 1; } bool muted; EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_FALSE(muted); // Verify that the first block of samples is set to 0. static const int kExpectedOutputLength = kInitSampleRateHz / 100; // 10 ms at initial sample rate. const int16_t* const_out_frame_data = out_frame_.data(); for (int i = 0; i < kExpectedOutputLength; ++i) { rtc::StringBuilder ss; ss << "i = " << i; SCOPED_TRACE(ss.str()); // Print out the parameter values on failure. EXPECT_EQ(0, const_out_frame_data[i]); } // Verify that the sample rate did not change from the initial configuration. EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz()); } class NetEqBgnTest : public NetEqDecodingTest { protected: void CheckBgn(int sampling_rate_hz) { size_t expected_samples_per_channel = 0; uint8_t payload_type = 0xFF; // Invalid. if (sampling_rate_hz == 8000) { expected_samples_per_channel = kBlockSize8kHz; payload_type = 93; // PCM 16, 8 kHz. } else if (sampling_rate_hz == 16000) { expected_samples_per_channel = kBlockSize16kHz; payload_type = 94; // PCM 16, 16 kHZ. } else if (sampling_rate_hz == 32000) { expected_samples_per_channel = kBlockSize32kHz; payload_type = 95; // PCM 16, 32 kHz. } else { ASSERT_TRUE(false); // Unsupported test case. } AudioFrame output; test::AudioLoop input; // We are using the same 32 kHz input file for all tests, regardless of // `sampling_rate_hz`. The output may sound weird, but the test is still // valid. ASSERT_TRUE(input.Init( webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"), 10 * sampling_rate_hz, // Max 10 seconds loop length. expected_samples_per_channel)); // Payload of 10 ms of PCM16 32 kHz. uint8_t payload[kBlockSize32kHz * sizeof(int16_t)]; RTPHeader rtp_info; PopulateRtpInfo(0, 0, &rtp_info); rtp_info.payloadType = payload_type; bool muted; for (int n = 0; n < 10; ++n) { // Insert few packets and get audio. auto block = input.GetNextBlock(); ASSERT_EQ(expected_samples_per_channel, block.size()); size_t enc_len_bytes = WebRtcPcm16b_Encode(block.data(), block.size(), payload); ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2); ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>( payload, enc_len_bytes))); output.Reset(); ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); ASSERT_EQ(1u, output.num_channels_); ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); // Next packet. rtp_info.timestamp += rtc::checked_cast<uint32_t>(expected_samples_per_channel); rtp_info.sequenceNumber++; } output.Reset(); // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull // one frame without checking speech-type. This is the first frame pulled // without inserting any packet, and might not be labeled as PLC. ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); ASSERT_EQ(1u, output.num_channels_); ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); // To be able to test the fading of background noise we need at lease to // pull 611 frames. const int kFadingThreshold = 611; // Test several CNG-to-PLC packet for the expected behavior. The number 20 // is arbitrary, but sufficiently large to test enough number of frames. const int kNumPlcToCngTestFrames = 20; bool plc_to_cng = false; for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) { output.Reset(); // Set to non-zero. memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes); ASSERT_EQ(0, neteq_->GetAudio(&output, &muted)); ASSERT_FALSE(muted); ASSERT_EQ(1u, output.num_channels_); ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_); if (output.speech_type_ == AudioFrame::kPLCCNG) { plc_to_cng = true; double sum_squared = 0; const int16_t* output_data = output.data(); for (size_t k = 0; k < output.num_channels_ * output.samples_per_channel_; ++k) sum_squared += output_data[k] * output_data[k]; EXPECT_EQ(0, sum_squared); } else { EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); } } EXPECT_TRUE(plc_to_cng); // Just to be sure that PLC-to-CNG has occurred. } }; TEST_F(NetEqBgnTest, RunTest) { CheckBgn(8000); CheckBgn(16000); CheckBgn(32000); } TEST_F(NetEqDecodingTest, SequenceNumberWrap) { // Start with a sequence number that will soon wrap. std::set<uint16_t> drop_seq_numbers; // Don't drop any packets. WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false); } TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) { // Start with a sequence number that will soon wrap. std::set<uint16_t> drop_seq_numbers; drop_seq_numbers.insert(0xFFFF); drop_seq_numbers.insert(0x0); WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false); } TEST_F(NetEqDecodingTest, TimestampWrap) { // Start with a timestamp that will soon wrap. std::set<uint16_t> drop_seq_numbers; WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true); } TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) { // Start with a timestamp and a sequence number that will wrap at the same // time. std::set<uint16_t> drop_seq_numbers; WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true); } TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { uint16_t seq_no = 0; uint32_t timestamp = 0; const int kFrameSizeMs = 10; const int kSampleRateKhz = 16; const int kSamples = kFrameSizeMs * kSampleRateKhz; const size_t kPayloadBytes = kSamples * 2; const int algorithmic_delay_samples = std::max(algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8); // Insert three speech packets. Three are needed to get the frame length // correct. uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; bool muted; for (int i = 0; i < 3; ++i) { PopulateRtpInfo(seq_no, timestamp, &rtp_info); ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); ++seq_no; timestamp += kSamples; // Pull audio once. ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } // Verify speech output. EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); // Insert same CNG packet twice. const int kCngPeriodMs = 100; const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz; size_t payload_len; PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); // This is the first time this CNG packet is inserted. ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>( payload, payload_len))); // Pull audio once and make sure CNG is played. ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); EXPECT_FALSE( neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG. EXPECT_EQ(timestamp - algorithmic_delay_samples, out_frame_.timestamp_ + out_frame_.samples_per_channel_); // Insert the same CNG packet again. Note that at this point it is old, since // we have already decoded the first copy of it. ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>( payload, payload_len))); // Pull audio until we have played `kCngPeriodMs` of CNG. Start at 10 ms since // we have already pulled out CNG once. for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) { ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); EXPECT_FALSE( neteq_->GetPlayoutTimestamp()); // Returns empty value during CNG. EXPECT_EQ(timestamp - algorithmic_delay_samples, out_frame_.timestamp_ + out_frame_.samples_per_channel_); } ++seq_no; timestamp += kCngPeriodSamples; uint32_t first_speech_timestamp = timestamp; // Insert speech again. for (int i = 0; i < 3; ++i) { PopulateRtpInfo(seq_no, timestamp, &rtp_info); ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); ++seq_no; timestamp += kSamples; } // Pull audio once and verify that the output is speech again. ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); absl::optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp(); ASSERT_TRUE(playout_timestamp); EXPECT_EQ(first_speech_timestamp + kSamples - algorithmic_delay_samples, *playout_timestamp); } TEST_F(NetEqDecodingTest, CngFirst) { uint16_t seq_no = 0; uint32_t timestamp = 0; const int kFrameSizeMs = 10; const int kSampleRateKhz = 16; const int kSamples = kFrameSizeMs * kSampleRateKhz; const int kPayloadBytes = kSamples * 2; const int kCngPeriodMs = 100; const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz; size_t payload_len; uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len); ASSERT_EQ(NetEq::kOK, neteq_->InsertPacket( rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len))); ++seq_no; timestamp += kCngPeriodSamples; // Pull audio once and make sure CNG is played. bool muted; ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); // Insert some speech packets. const uint32_t first_speech_timestamp = timestamp; int timeout_counter = 0; do { ASSERT_LT(timeout_counter++, 20) << "Test timed out"; PopulateRtpInfo(seq_no, timestamp, &rtp_info); ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); ++seq_no; timestamp += kSamples; // Pull audio once. ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_); } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp)); // Verify speech output. EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); } class NetEqDecodingTestWithMutedState : public NetEqDecodingTest { public: NetEqDecodingTestWithMutedState() : NetEqDecodingTest() { config_.enable_muted_state = true; } protected: static constexpr size_t kSamples = 10 * 16; static constexpr size_t kPayloadBytes = kSamples * 2; void InsertPacket(uint32_t rtp_timestamp) { uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; PopulateRtpInfo(0, rtp_timestamp, &rtp_info); EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); } void InsertCngPacket(uint32_t rtp_timestamp) { uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; size_t payload_len; PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len); EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>( payload, payload_len))); } bool GetAudioReturnMuted() { bool muted; EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); return muted; } void GetAudioUntilMuted() { while (!GetAudioReturnMuted()) { ASSERT_LT(counter_++, 1000) << "Test timed out"; } } void GetAudioUntilNormal() { bool muted = false; while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) { EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_LT(counter_++, 1000) << "Test timed out"; } EXPECT_FALSE(muted); } int counter_ = 0; }; // Verifies that NetEq goes in and out of muted state as expected. TEST_F(NetEqDecodingTestWithMutedState, MutedState) { // Insert one speech packet. InsertPacket(0); // Pull out audio once and expect it not to be muted. EXPECT_FALSE(GetAudioReturnMuted()); // Pull data until faded out. GetAudioUntilMuted(); EXPECT_TRUE(out_frame_.muted()); // Verify that output audio is not written during muted mode. Other parameters // should be correct, though. AudioFrame new_frame; int16_t* frame_data = new_frame.mutable_data(); for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) { frame_data[i] = 17; } bool muted; EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted)); EXPECT_TRUE(muted); EXPECT_TRUE(out_frame_.muted()); for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) { EXPECT_EQ(17, frame_data[i]); } EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_, new_frame.timestamp_); EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_); EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_); EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_); EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_); EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_); // Insert new data. Timestamp is corrected for the time elapsed since the last // packet. Verify that normal operation resumes. InsertPacket(kSamples * counter_); GetAudioUntilNormal(); EXPECT_FALSE(out_frame_.muted()); NetEqNetworkStatistics stats; EXPECT_EQ(0, neteq_->NetworkStatistics(&stats)); // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were // concealment samples, in Q14 (16384 = 100%) .The vast majority should be // concealment samples in this test. EXPECT_GT(stats.expand_rate, 14000); // And, it should be greater than the speech_expand_rate. EXPECT_GT(stats.expand_rate, stats.speech_expand_rate); } // Verifies that NetEq goes out of muted state when given a delayed packet. TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) { // Insert one speech packet. InsertPacket(0); // Pull out audio once and expect it not to be muted. EXPECT_FALSE(GetAudioReturnMuted()); // Pull data until faded out. GetAudioUntilMuted(); // Insert new data. Timestamp is only corrected for the half of the time // elapsed since the last packet. That is, the new packet is delayed. Verify // that normal operation resumes. InsertPacket(kSamples * counter_ / 2); GetAudioUntilNormal(); } // Verifies that NetEq goes out of muted state when given a future packet. TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) { // Insert one speech packet. InsertPacket(0); // Pull out audio once and expect it not to be muted. EXPECT_FALSE(GetAudioReturnMuted()); // Pull data until faded out. GetAudioUntilMuted(); // Insert new data. Timestamp is over-corrected for the time elapsed since the // last packet. That is, the new packet is too early. Verify that normal // operation resumes. InsertPacket(kSamples * counter_ * 2); GetAudioUntilNormal(); } // Verifies that NetEq goes out of muted state when given an old packet. TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) { // Insert one speech packet. InsertPacket(0); // Pull out audio once and expect it not to be muted. EXPECT_FALSE(GetAudioReturnMuted()); // Pull data until faded out. GetAudioUntilMuted(); EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_); // Insert a few packets which are older than the first packet. for (int i = 0; i < 5; ++i) { InsertPacket(kSamples * (i - 1000)); } EXPECT_FALSE(GetAudioReturnMuted()); EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); } // Verifies that NetEq doesn't enter muted state when CNG mode is active and the // packet stream is suspended for a long time. TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) { // Insert one CNG packet. InsertCngPacket(0); // Pull 10 seconds of audio (10 ms audio generated per lap). for (int i = 0; i < 1000; ++i) { bool muted; EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); ASSERT_FALSE(muted); } EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); } // Verifies that NetEq goes back to normal after a long CNG period with the // packet stream suspended. TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) { // Insert one CNG packet. InsertCngPacket(0); // Pull 10 seconds of audio (10 ms audio generated per lap). for (int i = 0; i < 1000; ++i) { bool muted; EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted)); } // Insert new data. Timestamp is corrected for the time elapsed since the last // packet. Verify that normal operation resumes. InsertPacket(kSamples * counter_); GetAudioUntilNormal(); } namespace { ::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a, const AudioFrame& b) { if (a.timestamp_ != b.timestamp_) return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_ << " != " << b.timestamp_ << ")"; if (a.sample_rate_hz_ != b.sample_rate_hz_) return ::testing::AssertionFailure() << "sample_rate_hz_ diff (" << a.sample_rate_hz_ << " != " << b.sample_rate_hz_ << ")"; if (a.samples_per_channel_ != b.samples_per_channel_) return ::testing::AssertionFailure() << "samples_per_channel_ diff (" << a.samples_per_channel_ << " != " << b.samples_per_channel_ << ")"; if (a.num_channels_ != b.num_channels_) return ::testing::AssertionFailure() << "num_channels_ diff (" << a.num_channels_ << " != " << b.num_channels_ << ")"; if (a.speech_type_ != b.speech_type_) return ::testing::AssertionFailure() << "speech_type_ diff (" << a.speech_type_ << " != " << b.speech_type_ << ")"; if (a.vad_activity_ != b.vad_activity_) return ::testing::AssertionFailure() << "vad_activity_ diff (" << a.vad_activity_ << " != " << b.vad_activity_ << ")"; return ::testing::AssertionSuccess(); } ::testing::AssertionResult AudioFramesEqual(const AudioFrame& a, const AudioFrame& b) { ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b); if (!res) return res; if (memcmp(a.data(), b.data(), a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) != 0) { return ::testing::AssertionFailure() << "data_ diff"; } return ::testing::AssertionSuccess(); } } // namespace TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) { ASSERT_FALSE(config_.enable_muted_state); config2_.enable_muted_state = true; CreateSecondInstance(); // Insert one speech packet into both NetEqs. const size_t kSamples = 10 * 16; const size_t kPayloadBytes = kSamples * 2; uint8_t payload[kPayloadBytes] = {0}; RTPHeader rtp_info; PopulateRtpInfo(0, 0, &rtp_info); EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload)); AudioFrame out_frame1, out_frame2; bool muted; for (int i = 0; i < 1000; ++i) { rtc::StringBuilder ss; ss << "i = " << i; SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure. EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted)); EXPECT_FALSE(muted); EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted)); if (muted) { EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2)); } else { EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2)); } } EXPECT_TRUE(muted); // Insert new data. Timestamp is corrected for the time elapsed since the last // packet. for (int i = 0; i < 5; ++i) { PopulateRtpInfo(0, kSamples * 1000 + kSamples * i, &rtp_info); EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload)); EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload)); } int counter = 0; while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) { ASSERT_LT(counter++, 1000) << "Test timed out"; rtc::StringBuilder ss; ss << "counter = " << counter; SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure. EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted)); EXPECT_FALSE(muted); EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted)); if (muted) { EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2)); } else { EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2)); } } EXPECT_FALSE(muted); } TEST_F(NetEqDecodingTest, TestConcealmentEvents) { const int kNumConcealmentEvents = 19; const size_t kSamples = 10 * 16; const size_t kPayloadBytes = kSamples * 2; int seq_no = 0; RTPHeader rtp_info; rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC. rtp_info.payloadType = 94; // PCM16b WB codec. rtp_info.markerBit = 0; const uint8_t payload[kPayloadBytes] = {0}; bool muted; for (int i = 0; i < kNumConcealmentEvents; i++) { // Insert some packets of 10 ms size. for (int j = 0; j < 10; j++) { rtp_info.sequenceNumber = seq_no++; rtp_info.timestamp = rtp_info.sequenceNumber * kSamples; neteq_->InsertPacket(rtp_info, payload); neteq_->GetAudio(&out_frame_, &muted); } // Lose a number of packets. int num_lost = 1 + i; for (int j = 0; j < num_lost; j++) { seq_no++; neteq_->GetAudio(&out_frame_, &muted); } } // Check number of concealment events. NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics(); EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events)); } // Test that the jitter buffer delay stat is computed correctly. void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) { const int kNumPackets = 10; const int kDelayInNumPackets = 2; const int kPacketLenMs = 10; // All packets are of 10 ms size. const size_t kSamples = kPacketLenMs * 16; const size_t kPayloadBytes = kSamples * 2; RTPHeader rtp_info; rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC. rtp_info.payloadType = 94; // PCM16b WB codec. rtp_info.markerBit = 0; const uint8_t payload[kPayloadBytes] = {0}; bool muted; int packets_sent = 0; int packets_received = 0; int expected_delay = 0; int expected_target_delay = 0; uint64_t expected_emitted_count = 0; while (packets_received < kNumPackets) { // Insert packet. if (packets_sent < kNumPackets) { rtp_info.sequenceNumber = packets_sent++; rtp_info.timestamp = rtp_info.sequenceNumber * kSamples; neteq_->InsertPacket(rtp_info, payload); } // Get packet. if (packets_sent > kDelayInNumPackets) { neteq_->GetAudio(&out_frame_, &muted); packets_received++; // The delay reported by the jitter buffer never exceeds // the number of samples previously fetched with GetAudio // (hence the min()). int packets_delay = std::min(packets_received, kDelayInNumPackets + 1); // The increase of the expected delay is the product of // the current delay of the jitter buffer in ms * the // number of samples that are sent for play out. int current_delay_ms = packets_delay * kPacketLenMs; expected_delay += current_delay_ms * kSamples; expected_target_delay += neteq_->TargetDelayMs() * kSamples; expected_emitted_count += kSamples; } } if (apply_packet_loss) { // Extra call to GetAudio to cause concealment. neteq_->GetAudio(&out_frame_, &muted); } // Check jitter buffer delay. NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics(); EXPECT_EQ(expected_delay, rtc::checked_cast<int>(stats.jitter_buffer_delay_ms)); EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count); EXPECT_EQ(expected_target_delay, rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms)); } TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) { TestJitterBufferDelay(false); } TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) { TestJitterBufferDelay(true); } TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) { const int kPacketLenMs = 10; // All packets are of 10 ms size. const size_t kSamples = kPacketLenMs * 16; const size_t kPayloadBytes = kSamples * 2; RTPHeader rtp_info; rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC. rtp_info.payloadType = 94; // PCM16b WB codec. rtp_info.markerBit = 0; const uint8_t payload[kPayloadBytes] = {0}; int expected_target_delay = neteq_->TargetDelayMs() * kSamples; neteq_->InsertPacket(rtp_info, payload); bool muted; neteq_->GetAudio(&out_frame_, &muted); rtp_info.sequenceNumber += 1; rtp_info.timestamp += kSamples; neteq_->InsertPacket(rtp_info, payload); rtp_info.sequenceNumber += 1; rtp_info.timestamp += kSamples; neteq_->InsertPacket(rtp_info, payload); expected_target_delay += neteq_->TargetDelayMs() * 2 * kSamples; // We have two packets in the buffer and kAccelerate operation will // extract 20 ms of data. neteq_->GetAudio(&out_frame_, &muted, nullptr, NetEq::Operation::kAccelerate); // Check jitter buffer delay. NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics(); EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms); EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count); EXPECT_EQ(expected_target_delay, rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms)); } namespace test { TEST(NetEqNoTimeStretchingMode, RunTest) { NetEq::Config config; config.for_test_no_time_stretching = true; auto codecs = NetEqTest::StandardDecoderMap(); std::map<int, RTPExtensionType> rtp_ext_map = { {1, kRtpExtensionAudioLevel}, {3, kRtpExtensionAbsoluteSendTime}, {5, kRtpExtensionTransportSequenceNumber}, {7, kRtpExtensionVideoContentType}, {8, kRtpExtensionVideoTiming}}; std::unique_ptr<NetEqInput> input = CreateNetEqRtpDumpInput( webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp"), rtp_ext_map, absl::nullopt /*No SSRC filter*/); std::unique_ptr<TimeLimitedNetEqInput> input_time_limit( new TimeLimitedNetEqInput(std::move(input), 20000)); std::unique_ptr<AudioSink> output(new VoidAudioSink); NetEqTest::Callbacks callbacks; NetEqTest test(config, CreateBuiltinAudioDecoderFactory(), codecs, /*text_log=*/nullptr, /*neteq_factory=*/nullptr, /*input=*/std::move(input_time_limit), std::move(output), callbacks); test.Run(); const auto stats = test.SimulationStats(); EXPECT_EQ(0, stats.accelerate_rate); EXPECT_EQ(0, stats.preemptive_rate); } } // namespace test } // namespace webrtc
08-02
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值