loam源码地址: https://github.com/cuitaixiang/LOAM_NOTED.
论文学习: LOAM: Lidar Odometry and Mapping in Real-time 论文阅读.
loam源码解析汇总:
loam源码解析1 : scanRegistration(一).
loam源码解析2 : scanRegistration(二).
loam源码解析3 : laserOdometry(一).
loam源码解析4 : laserOdometry(二).
loam源码解析5 : laserOdometry(三).
loam源码解析6 : laserMapping(一).
loam源码解析7 : laserMapping(二).
loam源码解析8 : transformMaintenance.
八、位姿估计
将特征点匹配以后开始进行位姿估计,在loam中将问题转换为一个最小二乘的优化问题,使用LM法进行迭代求解,所以优化的重点就是关于雅可比矩阵的求导。
1. 雅可比计算
对于损失函数(loss为特征点计算出的距离,代码中用
B
B
B矩阵表示):
l
o
s
s
=
F
(
X
~
k
+
1
,
T
k
+
1
)
loss=F(\tilde{X}_{k+1},T_{k+1})
loss=F(X~k+1,Tk+1)
其雅可比矩阵(代码中用
A
A
A矩阵表示)为:
J
=
∂
F
∂
T
k
+
1
=
∂
F
∂
X
~
k
+
1
∂
X
~
k
+
1
∂
T
k
+
1
J=\frac{\partial F}{\partial T_{k+1}}=\frac{\partial F}{\partial \tilde{X}_{k+1}}\frac{\partial \tilde{X}_{k+1}}{\partial T_{k+1}}
J=∂Tk+1∂F=∂X~k+1∂F∂Tk+1∂X~k+1
其中
∂
F
∂
X
~
k
+
1
\frac{\partial F}{\partial \tilde{X}_{k+1}}
∂X~k+1∂F已经完成,若是线特征则为点到直线方向的单位向量,若是线特征则为点到平面方向的单位向量。关于
∂
X
~
k
+
1
∂
T
k
+
1
\frac{\partial \tilde{X}_{k+1}}{\partial T_{k+1}}
∂Tk+1∂X~k+1j可分别对平移和旋转求导,不再赘述。
所以最后问题的求解变成了求解:
∑
i
=
0
p
o
i
n
t
S
e
l
N
u
m
H
i
X
i
=
∑
i
=
0
p
o
i
n
t
S
e
l
N
u
m
b
i
\sum_{i=0} ^{pointSelNum} H_i X_i= \sum_{i=0} ^{pointSelNum} b_i
i=0∑pointSelNumHiXi=i=0∑pointSelNumbi
其中:
H
i
=
A
i
T
A
i
H_i=A^T_iA_i
Hi=AiTAi
b
i
=
−
A
i
T
B
i
b_i=-A^T_iB_i
bi=−AiTBi
定义各个矩阵:
cv::Mat matA(pointSelNum, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matAt(6, pointSelNum, CV_32F, cv::Scalar::all(0));
cv::Mat matAtA(6, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matB(pointSelNum, 1, CV_32F, cv::Scalar::all(0));
cv::Mat matAtB(6, 1, CV_32F, cv::Scalar::all(0));
cv::Mat matX(6, 1, CV_32F, cv::Scalar::all(0));
对每个特征点求解雅可比矩阵A和损失向量B,拼装成为一个大的求解矩阵。
//计算matA,matB矩阵
for (int i = 0; i < pointSelNum; i++) {
pointOri = laserCloudOri->points[i];
coeff = coeffSel->points[i];
float s = 1;
float srx = sin(s * transform[0]);
float crx = cos(s * transform[0]);
float sry = sin(s * transform[1]);
float cry = cos(s * transform[1]);
float srz = sin(s * transform[2]);
float crz = cos(s * transform[2]);
float tx = s * transform[3];
float ty = s * transform[4];
float tz = s * transform[5];
float arx = (-s*crx*sry*srz*pointOri.x + s*crx*crz*sry*pointOri.y + s*srx*sry*pointOri.z
+ s*tx*crx*sry*srz - s*ty*crx*crz*sry - s*tz*srx*sry) * coeff.x
+ (s*srx*srz*pointOri.x - s*crz*srx*pointOri.y + s*crx*pointOri.z
+ s*ty*crz*srx - s*tz*crx - s*tx*srx*srz) * coeff.y
+ (s*crx*cry*srz*pointOri.x - s*crx*cry*crz*pointOri.y - s*cry*srx*pointOri.z
+ s*tz*cry*srx + s*ty*crx*cry*crz - s*tx*crx*cry*srz) * coeff.z;
float ary = ((-s*crz*sry - s*cry*srx*srz)*pointOri.x
+ (s*cry*crz*srx - s*sry*srz)*pointOri.y - s*crx*cry*pointOri.z
+ tx*(s*crz*sry + s*cry*srx*srz) + ty*(s*sry*srz - s*cry*crz*srx)
+ s*tz*crx*cry) * coeff.x
+ ((s*cry*crz - s*srx*sry*srz)*pointOri.x
+ (s*cry*srz + s*crz*srx*sry)*pointOri.y - s*crx*sry*pointOri.z
+ s*tz*crx*sry - ty*(s*cry*srz + s*crz*srx*sry)
- tx*(s*cry*crz - s*srx*sry*srz)) * coeff.z;
float arz = ((-s*cry*srz - s*crz*srx*sry)*pointOri.x + (s*cry*crz - s*srx*sry*srz)*pointOri.y
+ tx*(s*cry*srz + s*crz*srx*sry) - ty*(s*cry*crz - s*srx*sry*srz)) * coeff.x
+ (-s*crx*crz*pointOri.x - s*crx*srz*pointOri.y
+ s*ty*crx*srz + s*tx*crx*crz) * coeff.y
+ ((s*cry*crz*srx - s*sry*srz)*pointOri.x + (s*crz*sry + s*cry*srx*srz)*pointOri.y
+ tx*(s*sry*srz - s*cry*crz*srx) - ty*(s*crz*sry + s*cry*srx*srz)) * coeff.z;
float atx = -s*(cry*crz - srx*sry*srz) * coeff.x + s*crx*srz * coeff.y
- s*(crz*sry + cry*srx*srz) * coeff.z;
float aty = -s*(cry*srz + crz*srx*sry) * coeff.x - s*crx*crz * coeff.y
- s*(sry*srz - cry*crz*srx) * coeff.z;
float atz = s*crx*sry * coeff.x - s*srx * coeff.y - s*crx*cry * coeff.z;
float d2 = coeff.intensity;
matA.at<float>(i, 0) = arx;
matA.at<float>(i, 1) = ary;
matA.at<float>(i, 2) = arz;
matA.at<float>(i, 3) = atx;
matA.at<float>(i, 4) = aty;
matA.at<float>(i, 5) = atz;
matB.at<float>(i, 0) = -0.05 * d2;
}
2. 矩阵求解
使用OpenCV函数进行求解,利用QR分解加速:
cv::transpose(matA, matAt);
matAtA = matAt * matA;
matAtB = matAt * matB;
//求解matAtA * matX = matAtB
cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR);
关于OpenCV中的solve函数的一点说明 ,函数原型如下:
bool solve(const Mat & src1, const Mat & src2, Mat & dst, int flags = DECOMP_LU);
//src1:输入矩阵
//src2:线性系统的右部
//dst:输出矩阵
//flags:求解方法
// DECOMP_LU - Gaussian
// DECOMP_CHOLESKY
// DECOMP_EIG
// DECOMP_SVD
// DECOMP_QR
// DECOMP_NORMAL
3. 退化问题分析
LOAM的作者Ji Zhang发表在2016年IROS上发表过一篇关于优化问题的退化处理,若是想更详细的了解请参考:On Degeneracy of Optimization-based State Estimation Problems
定位的退化问题简单来说就是因为约束的减少,而用线性代数的知识直观的表述就是增量方程中的Hessian矩阵必须要正定。所以在求解增量方程时,可以将H矩阵的特征值和特征向量求解出来,那么较小的特征值对应的特征向量的方向就是退化的方向。
在第一次迭代时,求解Hessian矩阵的特征值和特征向量:
在OpenCV中,eigen函数参数分别为:带分解的矩阵,输出特征值(从大到小),对应的特征向量矩阵。而copyto函数的使用:A.copyto(B),表示将A复制到B。
if (iterCount == 0) {
//特征值1*6矩阵
cv::Mat matE(1, 6, CV_32F, cv::Scalar::all(0));
//特征向量6*6矩阵
cv::Mat matV(6, 6, CV_32F, cv::Scalar::all(0));
cv::Mat matV2(6, 6, CV_32F, cv::Scalar::all(0));
//求解特征值/特征向量
cv::eigen(matAtA, matE, matV);
matV.copyTo(matV2);
isDegenerate = false;
若特征值小于设定的阈值,说明发生退化,将对应的特征向量置0 ,相当于舍弃该方向
//特征值取值门槛
float eignThre[6] = {10, 10, 10, 10, 10, 10};
for (int i = 5; i >= 0; i--) {//从小到大查找
if (matE.at<float>(0, i) < eignThre[i]) {//特征值太小,则认为处在兼并环境中,发生了退化
for (int j = 0; j < 6; j++) {//对应的特征向量置为0
matV2.at<float>(i, j) = 0;
}
isDegenerate = true;
} else {
break;
}
}
计算预测矩阵:
//计算P矩阵
matP = matV.inv() * matV2;
}
使用预测矩阵修正增量:
if (isDegenerate) {//如果发生退化,只使用预测矩阵P计算
cv::Mat matX2(6, 1, CV_32F, cv::Scalar::all(0));
matX.copyTo(matX2);
matX = matP * matX2;
}
4. 姿态更新
使用迭代计算的增量进行姿态更新
//累加每次迭代的旋转平移量
transform[0] += matX.at<float>(0, 0);
transform[1] += matX.at<float>(1, 0);
transform[2] += matX.at<float>(2, 0);
transform[3] += matX.at<float>(3, 0);
transform[4] += matX.at<float>(4, 0);
transform[5] += matX.at<float>(5, 0);
计算姿态是否合法,以及是否满足迭代条件
for(int i=0; i<6; i++){
if(isnan(transform[i]))//判断是否非数字
transform[i]=0;
}
//计算旋转平移量,如果很小就停止迭代
float deltaR = sqrt(
pow(rad2deg(matX.at<float>(0, 0)), 2) +
pow(rad2deg(matX.at<float>(1, 0)), 2) +
pow(rad2deg(matX.at<float>(2, 0)), 2));
float deltaT = sqrt(
pow(matX.at<float>(3, 0) * 100, 2) +
pow(matX.at<float>(4, 0) * 100, 2) +
pow(matX.at<float>(5, 0) * 100, 2));
if (deltaR < 0.1 && deltaT < 0.1) {//迭代终止条件
break;
}
}
5. 坐标转换
float rx, ry, rz, tx, ty, tz;
//求相对于原点的旋转量,垂直方向上1.05倍修正?
AccumulateRotation(transformSum[0], transformSum[1], transformSum[2],
-transform[0], -transform[1] * 1.05, -transform[2], rx, ry, rz);
float x1 = cos(rz) * (transform[3] - imuShiftFromStartX)
- sin(rz) * (transform[4] - imuShiftFromStartY);
float y1 = sin(rz) * (transform[3] - imuShiftFromStartX)
+ cos(rz) * (transform[4] - imuShiftFromStartY);
float z1 = transform[5] * 1.05 - imuShiftFromStartZ;
float x2 = x1;
float y2 = cos(rx) * y1 - sin(rx) * z1;
float z2 = sin(rx) * y1 + cos(rx) * z1;
//求相对于原点的平移量
tx = transformSum[3] - (cos(ry) * x2 + sin(ry) * z2);
ty = transformSum[4] - y2;
tz = transformSum[5] - (-sin(ry) * x2 + cos(ry) * z2);
//根据IMU修正旋转量
PluginIMURotation(rx, ry, rz, imuPitchStart, imuYawStart, imuRollStart,
imuPitchLast, imuYawLast, imuRollLast, rx, ry, rz);
//得到世界坐标系下的转移矩阵
transformSum[0] = rx;
transformSum[1] = ry;
transformSum[2] = rz;
transformSum[3] = tx;
transformSum[4] = ty;
transformSum[5] = tz;
欧拉角转换成四元数:
geometry_msgs::Quaternion geoQuat = tf::createQuaternionMsgFromRollPitchYaw(rz, -rx, -ry);
发布里程计信息,一个用于建图,一个用于rviz
//publish四元数和平移量
laserOdometry.header.stamp = ros::Time().fromSec(timeSurfPointsLessFlat);
laserOdometry.pose.pose.orientation.x = -geoQuat.y;
laserOdometry.pose.pose.orientation.y = -geoQuat.z;
laserOdometry.pose.pose.orientation.z = geoQuat.x;
laserOdometry.pose.pose.orientation.w = geoQuat.w;
laserOdometry.pose.pose.position.x = tx;
laserOdometry.pose.pose.position.y = ty;
laserOdometry.pose.pose.position.z = tz;
pubLaserOdometry.publish(laserOdometry);
//广播新的平移旋转之后的坐标系(rviz)
laserOdometryTrans.stamp_ = ros::Time().fromSec(timeSurfPointsLessFlat);
laserOdometryTrans.setRotation(tf::Quaternion(-geoQuat.y, -geoQuat.z, geoQuat.x, geoQuat.w));
laserOdometryTrans.setOrigin(tf::Vector3(tx, ty, tz));
tfBroadcaster.sendTransform(laserOdometryTrans);
对点云的曲率比较大和比较小的点投影到扫描结束位置,但是对于全部点云,则是跳帧投影。
int cornerPointsLessSharpNum = cornerPointsLessSharp->points.size();
for (int i = 0; i < cornerPointsLessSharpNum; i++) {
TransformToEnd(&cornerPointsLessSharp->points[i], &cornerPointsLessSharp->points[i]);
}
int surfPointsLessFlatNum = surfPointsLessFlat->points.size();
for (int i = 0; i < surfPointsLessFlatNum; i++) {
TransformToEnd(&surfPointsLessFlat->points[i], &surfPointsLessFlat->points[i]);
}
frameCount++;
//点云全部点,每间隔一个点云数据相对点云最后一个点进行畸变校正
if (frameCount >= skipFrameNum + 1) {
int laserCloudFullResNum = laserCloudFullRes->points.size();
for (int i = 0; i < laserCloudFullResNum; i++) {
TransformToEnd(&laserCloudFullRes->points[i], &laserCloudFullRes->points[i]);
}
}
将当前点云的曲率比较大和比较小的点保存为上一帧,并建立kd树
//畸变校正之后的点作为last点保存等下个点云进来进行匹配
pcl::PointCloud<PointType>::Ptr laserCloudTemp = cornerPointsLessSharp;
cornerPointsLessSharp = laserCloudCornerLast;
laserCloudCornerLast = laserCloudTemp;
laserCloudTemp = surfPointsLessFlat;
surfPointsLessFlat = laserCloudSurfLast;
laserCloudSurfLast = laserCloudTemp;
laserCloudCornerLastNum = laserCloudCornerLast->points.size();
laserCloudSurfLastNum = laserCloudSurfLast->points.size();
//点足够多就构建kd-tree,否则弃用此帧,沿用上一帧数据的kd-tree
if (laserCloudCornerLastNum > 10 && laserCloudSurfLastNum > 100) {
kdtreeCornerLast->setInputCloud(laserCloudCornerLast);
kdtreeSurfLast->setInputCloud(laserCloudSurfLast);
}
按照跳帧数publich边沿点,平面点以及全部点给laserMapping(每隔一帧发一次)
if (frameCount >= skipFrameNum + 1) {
frameCount = 0;
sensor_msgs::PointCloud2 laserCloudCornerLast2;
pcl::toROSMsg(*laserCloudCornerLast, laserCloudCornerLast2);
laserCloudCornerLast2.header.stamp = ros::Time().fromSec(timeSurfPointsLessFlat);
laserCloudCornerLast2.header.frame_id = "/camera";
pubLaserCloudCornerLast.publish(laserCloudCornerLast2);
sensor_msgs::PointCloud2 laserCloudSurfLast2;
pcl::toROSMsg(*laserCloudSurfLast, laserCloudSurfLast2);
laserCloudSurfLast2.header.stamp = ros::Time().fromSec(timeSurfPointsLessFlat);
laserCloudSurfLast2.header.frame_id = "/camera";
pubLaserCloudSurfLast.publish(laserCloudSurfLast2);
sensor_msgs::PointCloud2 laserCloudFullRes3;
pcl::toROSMsg(*laserCloudFullRes, laserCloudFullRes3);
laserCloudFullRes3.header.stamp = ros::Time().fromSec(timeSurfPointsLessFlat);
laserCloudFullRes3.header.frame_id = "/camera";
pubLaserCloudFullRes.publish(laserCloudFullRes3);
}
}
status = ros::ok();
rate.sleep();
}