1 题目
对于本次作业,你需要完成自动代客泊车(AVP)系统的定位算法。作业提供了一个从仿真中获取的分割逆透视映射(IPM)图像数据集。你需要完成定位算法的处理过程,并显示AVP定位的最终结果。请完善src/avp_localization.cpp
文件中的imageRegistration()
函数。
2 解答
该任务的关键步骤为,
(1)使用pcl
库中自带的最近邻函数nearestKSearch()
完成数据关联。
(2)求两个点云的质心。
(3)求去质心点云。
(4)求矩阵H,
H
=
∑
i
p
s
r
c
i
(
p
d
s
t
i
)
T
(1)
H = \sum_i p_{src}^i (p_{dst}^i)^T \tag{1}
H=i∑psrci(pdsti)T(1)
(5)对矩阵H进行奇异值分解,
H
=
U
Σ
V
T
(2)
H = U \Sigma V^T \tag{2}
H=UΣVT(2)
(6)计算旋转矩阵
R
R
R,
R
=
V
[
1
0
0
0
1
0
0
0
d
e
t
(
V
U
T
)
]
U
T
(3)
R = V \begin{bmatrix}1 & 0 & 0\\ 0 & 1 & 0 \\ 0 & 0 & det(VU^T) \end{bmatrix} U^T \tag{3}
R=V
10001000det(VUT)
UT(3)
(7)计算平移向量
t
t
t,
t
=
p
ˉ
d
s
t
−
R
⋅
p
ˉ
s
r
c
(4)
t = \bar{p}_{dst} - R\cdot \bar{p}_{src} \tag{4}
t=pˉdst−R⋅pˉsrc(4)
代码如下,
// Registration frame to map
//返回T_world_vehicle表示vehicle系到world系的变换
Eigen::Affine3d AvpLocalization::imageRegistration() {
auto slot_num = curr_frame_.getSemanticElement(SemanticLabel::kSlot).size();
auto dash_num = slot_num + curr_frame_.getSemanticElement(SemanticLabel::kDashLine).size();
auto total_num = dash_num + curr_frame_.getSemanticElement(SemanticLabel::kArrowLine).size();
//total_num表示slot,dashline,arrowline的总点数
std::vector<char> correspondences; // 1 indicate relative point is correspondent
pcl::PointCloud<pcl::PointXYZ> cloud_in; // points in map
//cloud_in表示地图中的点云数据
pcl::PointCloud<pcl::PointXYZ> cloud_out, cloud_est; // points in frame
//cloud_out表示当前帧的点云数据
// get points in frame
cloud_out.reserve(total_num);
for (const auto &grid :
curr_frame_.getSemanticElement(SemanticLabel::kSlot)) {
cloud_out.emplace_back(grid.x(), grid.y(), 0);
}
for (const auto &grid :
curr_frame_.getSemanticElement(SemanticLabel::kDashLine)) {
cloud_out.emplace_back(grid.x(), grid.y(), 0);
}
for (const auto &grid :
curr_frame_.getSemanticElement(SemanticLabel::kArrowLine)) {
cloud_out.emplace_back(grid.x(), grid.y(), 0);
}
cloud_in.resize(cloud_out.size());
Eigen::Matrix3f R = Eigen::Matrix3f::Identity();
Eigen::Vector3f t = Eigen::Vector3f(0, 0, 0);
const int max_iter = 10;
for (int n = 0; n < max_iter; n++) {
Eigen::Affine3f T_est = Eigen::Translation3f(t) * R;
pcl::transformPointCloud(cloud_out, cloud_est, T_est);
//cloud_out为输入点云,表示待变换的点云数据
//cloud_est为输出点云,表示变换后的点云数据
/ TODO: finding correspondences by kd-tree nearest neighbor search
correspondences.resize(total_num, 0);
//使用kd-树做最近邻搜索
//填充cloud_in
//从地图中avp_map_找到最近的点,将其放入cloud_in中
for (int i = 0; i < cloud_est.size(); ++i) {
pcl::PointXYZ pt = cloud_est[i];
int k = 1;
std::vector<int> pointIdxNKNSearch(k);
std::vector<float> pointNKNSquaredDistance(k);
if (kdtree_all_.nearestKSearch(pt, k, pointIdxNKNSearch, pointNKNSquaredDistance) > 0) {
int nearestIdx = pointIdxNKNSearch[0];
float nearestDistance = pointNKNSquaredDistance[0];
pcl::PointXYZ nearestPoint = all_cloud_in_->points[nearestIdx];
//是否需要判断一下最近点之间的距离呢???
cloud_in[i] = nearestPoint;
correspondences[i] = 1;
}
}
//cloud_in为地图中的点
/
// size_t num_pts = 0;
// Eigen::Vector3f sum_point_in = Eigen::Vector3f::Zero();
// Eigen::Vector3f sum_point_out = Eigen::Vector3f::Zero();
// for (size_t i = 0; i < total_num; ++i) {
// if (correspondences[i]) {
// ++num_pts;
// sum_point_in += Eigen::Vector3f(cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
// sum_point_out += Eigen::Vector3f(cloud_out[i].x, cloud_out[i].y, cloud_out[i].z);
// }
// }
// Eigen::Vector3f u_point_in = sum_point_in / (num_pts + 1e-10);
// Eigen::Vector3f u_point_out = sum_point_out / (num_pts + 1e-10);
// std::cout << "\ricp itr " << n + 1 << " / 10, pts: " << num_pts
// << ", dYaw: " << GetYaw(Eigen::Quaternionf(R)) * kToDeg
// << ", trans: " << t.transpose() * float(kPixelScale);
// std::cout.flush();
TODO: calculate close-form R t //
//初始点云cloud_est,表示当前帧的点
//目标点云cloud_in,表示地图中的点
//求cloud_est到cloud_in的变换,也即当前帧到地图的变换
//(1)求质心坐标
Eigen::Vector3f average_pt_1 = Eigen::Vector3f::Zero();
Eigen::Vector3f average_pt_2 = Eigen::Vector3f::Zero();
int num_pts = 0;
for (int i = 0; i < cloud_in.size(); ++i) {
if (correspondences[i]) {
++num_pts;
average_pt_1 += Eigen::Vector3f(cloud_est[i].x, cloud_est[i].y, cloud_est[i].z);
average_pt_2 += Eigen::Vector3f(cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
}
}
average_pt_1 /= (num_pts + 1e-10);
average_pt_2 /= (num_pts + 1e-10);
//(2)求去质心坐标
std::vector<Eigen::Vector3f> cloud1, cloud2;
for (int i = 0; i < cloud_in.size(); ++i) {
if (correspondences[i]) {
Eigen::Vector3f pt1 = Eigen::Vector3f(cloud_est[i].x, cloud_est[i].y, cloud_est[i].z);
pt1 -= average_pt_1;
cloud1.emplace_back(pt1);
Eigen::Vector3f pt2 = Eigen::Vector3f(cloud_in[i].x, cloud_in[i].y, cloud_in[i].z);
pt2 -= average_pt_2;
cloud2.emplace_back(pt2);
}
}
//(3)求3*3矩阵H
//src * dst^T
Eigen::Matrix3f H = Eigen::Matrix3f::Zero();
for (int i = 0; i < cloud1.size(); ++i) {
H += cloud1[i] * cloud2[i].transpose();
}
//对矩阵H做正交分解
Eigen::JacobiSVD<Eigen::Matrix3f> svd(H, Eigen::ComputeFullU | Eigen::ComputeFullV);
//获取U矩阵(左奇异向量)
Eigen::Matrix3f U = svd.matrixU();
//获取V矩阵(右奇异向量)
Eigen::Matrix3f V = svd.matrixV();
float dev1 = (V * U.transpose()).determinant();
Eigen::Vector3f diag_elements(1.0, 1.0, dev1);
Eigen::Matrix3f diag_matrix = diag_elements.asDiagonal();
//(4)得到R1,t1
Eigen::Matrix3f R1 = V * diag_matrix * U.transpose();
Eigen::Vector3f t1 = average_pt_2 - R1 * average_pt_1;
//更新最终的R和t
R = R1 * R;
t = R1 * t + t1;
//
}
std::cout << std::endl;
t *= float(kPixelScale); // grid scale to world scale
Eigen::Affine3d dT =
Eigen::Translation3d(t.cast<double>()) * R.cast<double>();
Eigen::Affine3d T_world_vehicle =
dT * curr_frame_.T_world_ipm_ * T_vehicle_ipm_.inverse();
return T_world_vehicle;
}
定位结果展示图如下,