加入收藏 | 设为首页 | 会员中心 | 我要投稿 李大同 (https://www.lidatong.com.cn/)- 科技、建站、经验、云计算、5G、大数据,站长网!
当前位置: 首页 > 百科 > 正文

手推 Bundle Adjustment(2)--根据两幅图像求位姿优化的C++实现

发布时间:2020-12-15 04:52:04 所属栏目:百科 来源:网络整理
导读:需要使用的第三方库: opencv Eigen3 Sophus 解决问题:根据两幅RGB图像和对应的深度图像,通过特征点匹配+PnP估计变化的位姿,然后在通过Bundle Adjustment来优化位姿。 // // Created by wpr on 18-12-19. // #include #include #include #include #includ

需要使用的第三方库:


opencv Eigen3 Sophus


解决问题:根据两幅RGB图像和对应的深度图像,通过特征点匹配+PnP估计变化的位姿,然后在通过Bundle Adjustment来优化位姿。

//

// Created by wpr on 18-12-19.

//

#include

#include

#include

#include

#include

#include

#include

#include "sophus/se3.h"

using namespace cv;

using namespace std;

using namespace Eigen;

typedef vector> VecVector3d;

typedef vector> VecVector2d;

typedef Matrix Vector6d;

int main()

{

cout << "Hello BA~" << endl;

VecVector2d p2d;

VecVector3d p3d;

double fx = 520.9,fy = 521.0,cx = 325.1,cy = 249.7;

Mat img1 = imread("./data/1.png",CV_LOAD_IMAGE_COLOR);

Mat img2 = imread("./data/2.png",CV_LOAD_IMAGE_COLOR);

vector keypoints1;

vector keypoints2;

vector matches;

Mat descriptions1,descriptions2;

Ptr detector = ORB::create();

Ptr descriptor = ORB::create();

Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming");

detector->detect(img1,keypoints1);

detector->detect(img2,keypoints2);

descriptor-> compute(img1,keypoints1,descriptions1);

descriptor-> compute(img2,keypoints2,descriptions2);

vector match;

matcher->match(descriptions1,descriptions2,match);

//匹配点对筛选

double min_dist=10000,max_dist=0;

for(int i = 0; i < descriptions1.rows; i++)

{

double dist = match[i].distance;

if(dist < min_dist)min_dist = dist;

if(dist > max_dist)max_dist = dist;

}

cout << "-- Min dist :" << min_dist << endl;

cout << "-- Max dist :" << max_dist << endl;

for(int i = 0; i < descriptions1.rows; i++)

{

if ( match[i].distance <= max ( 2*min_dist,30.0 ) )

matches.push_back(match[i]);

}

cout << "一共找到了" << matches.size() << "组匹配点" << endl;

Mat depthimg = imread("../data/1_depth.png",CV_LOAD_IMAGE_COLOR);

for(auto m:matches)

{

ushort d = depthimg.ptr(int( keypoints1[m.queryIdx].pt.y))[int (keypoints1[m.queryIdx].pt.x)];

if (d == 0)

continue;

float z = d/5000.0;

float y = ( keypoints1[m.queryIdx].pt.x - cx) * z /fx;

float x = ( keypoints1[m.queryIdx].pt.y - cy) * z /fy;

p2d.push_back(Vector2d(keypoints1[m.queryIdx].pt.x,keypoints1[m.queryIdx].pt.y));

p3d.push_back(Vector3d(x,y,z));

}

int iterator_time = 100;

double cost = 0,lastcost = 0;

int n_points = p3d.size();

cout << "路标点的数量为" << n_points << endl;

Sophus::SE3 T_esti; //camera pose

for (auto iter = 0; iter < iterator_time; iter++)

{

Matrix H = Matrix::Zero();

Vector6d b = Vector6d::Zero();

cost = 0;

//compute cost

for ( int i = 0; i < n_points; i++)

{

Vector2d p2 = p2d[i];

Vector3d p3 = p3d[i];

Vector3d P = T_esti * p3;

double x = P[0];

double y = P[1];

double z = P[2];

Vector2d p2_ = {fx * ( x/z ) + cx,fy * ( y/z ) + cy};

Vector2d e = p2 - p2_;

cost += (e[0]*e[0]+e[1]*e[1]);

// compute jacobian

Matrix J;

J(0,0) = -(fx/z);

J(0,1) = 0;

J(0,2) = (fx*x/(z*z));

J(0,3) = (fx*x*y/(z*z));

J(0,4) = -(fx*x*x/(z*z)+fx);

J(0,5) = (fx*y/z);

J(1,0) = 0;

J(1,1) = -(fy/z);

J(1,2) = (fy*y/(z*z));

J(1,3) = (fy*y*y/(z*z)+fy);

J(1,4) = -(fy*x*y/(z*z));

J(1,5) = -(fy*x/z);

H += J.transpose() * J;

b += -J.transpose() * e;

}

// solve dx

Vector6d dx;

dx = H.ldlt().solve(b);

if (isnan(dx[0])) {

cout << "result is nan!" << endl;

break;

}

if (iter > 0 && cost >= lastcost) {

// 误差增长了,说明近似的不够好

cout << "cost: " << cost << ",last cost: " << lastcost << endl;

break;

}

// 更新估计位姿

T_esti = Sophus::SE3::exp(dx)*T_esti;

lastcost = cost;

cout << "iteration " << iter << " cost=" << cout.precision(12) << cost << endl;

}

return 0;

}

(编辑:李大同)

【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容!

    推荐文章
      热点阅读