-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtestVisualSLAM.cpp
More file actions
140 lines (111 loc) · 6.07 KB
/
testVisualSLAM.cpp
File metadata and controls
140 lines (111 loc) · 6.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#include "VisualSLAM.h"
#include "Viewer.h"
#include "GlobalParam.h"
#include <iostream>
#include <fstream>
int main(int argc, char** argv){
if (argc < 5){
std::cout << "Usage: ./slam <input_left_image_directory> <input_right_image_directory> <camera_intrinsics_file_path> <num_images> [ground_truth_data_path]" << std::endl;
exit(1);
}
std::string input_left_images_path = argv[1];
std::string input_right_images_path = argv[2];
std::string camera_intrinsics_path = argv[3];
int num_images = std::stoi(argv[4]);
std::string image_name_template = "00000";
if (num_images <= 0)
{
throw std::runtime_error("The number of image pairs is invalid");
}
VisualSLAM slam;
slam.readCameraIntrisics(camera_intrinsics_path);
std::vector<Sophus::SE3d> groundTruthPoses;
if (argc >= 6){
std::string ground_truth_path = argv[5];
slam.readGroundTruthData(ground_truth_path, num_images, groundTruthPoses);
}
cv::Mat window = cv::Mat::zeros(1000, 1000, CV_8UC3);
cv::Mat prevImageLeft, prevImageRight;
std::vector<cv::Point2f> pointsCurrentFrame_left, pointsPrevFrame_left;
std::vector<cv::Point2f> pointsCurrentFrame_right, pointsPrevFrame_right;
std::vector<cv::KeyPoint> keypoints_left, keypoints_right;
cv::Mat descriptors_left,descriptors_right;
Eigen::Matrix3d cumR = Eigen::Matrix3d::Identity();
int k = 1;
std::vector<cv::Mat> images_left(num_images), images_right(num_images);
for (int i = 0; i < num_images; i++){
if (i == std::pow(10, k)){
image_name_template = image_name_template.substr(0, image_name_template.length() - 1);
k++;
}
std::string image_left_name = input_left_images_path + image_name_template + std::to_string(i) + ".png";
std::string image_right_name = input_right_images_path + image_name_template + std::to_string(i) + ".png";
cv::Mat image_left = cv::imread(image_left_name, 0);
cv::Mat image_right = cv::imread(image_right_name, 0);
if (image_left.cols == 0 || image_left.rows == 0){
throw std::runtime_error("Cannot read the image with the path: " + image_left_name);
}
if (image_right.cols == 0 || image_right.rows == 0){
throw std::runtime_error("Cannot read the image with the path: " + image_right_name);
}
image_left.copyTo(images_left[i]);
image_right.copyTo(images_right[i]);
}
Viewer* viewer = new Viewer(slam);
std::thread* viewerThread = new std::thread(&Viewer::run, viewer);
float baseline = 0.53716;
VisualizationToolkit* visToolkit = new VisualizationToolkit(slam.getCameraMatrix(), baseline);
//std::thread* disparityThread = new std::thread(&VisualizationToolkit::computeAndShowPointCloud, visToolkit);
int keyFrameStep = 1;
int numKeyFrames = 10;
Eigen::Vector3d translGTAccumulated, translEstimAccumulatedLeft, translEstimAccumulatedRight;
for (int i = 0; i < num_images; i++){
cv::Mat disparity_map = slam.getDisparityMap(images_left[i], images_right[i]);
Sophus::SE3d pose_left = slam.performFrontEndStepWithTracking(images_left[i], disparity_map, pointsCurrentFrame_left, pointsPrevFrame_left, prevImageLeft, true);
Sophus::SE3d pose_right = slam.performFrontEndStepWithTracking(images_right[i], disparity_map, pointsCurrentFrame_right, pointsPrevFrame_right, prevImageRight, false);
//Sophus::SE3d pose_left = slam.performFrontEndStep(images_left[i], disparity_map, keypoints_left, descriptors_left, true);
//Sophus::SE3d pose_right = slam.performFrontEndStep(images_right[i], disparity_map, keypoints_right, descriptors_right, false);
visToolkit->plot2DPoints(images_right[i], pointsCurrentFrame_right);
visToolkit->setDataForPointCloudVisualization(images_left[i], disparity_map);
/*if (i % (keyFrameStep*numKeyFrames) == 0 && i > 0){
//if (i >= (keyFrameStep*numKeyFrames)){
if (slam.performPoseGraphOptimization(keyFrameStep, numKeyFrames)){
std::cout << "Pose Graph is SUCCESSFULL!" << std::endl;
}
}*/
Sophus::SE3d cumPoseLeft, cumPoseRight;
if (i != 0){
cumPoseRight = slam.getPose_right(i);
cumPoseLeft = slam.getPose_left(i);
}
#ifdef VIS_TRAJECTORY
if (!groundTruthPoses.empty() && i < groundTruthPoses.size()){
if (i == 0){
Sophus::SE3d groundTruthPrevPose = Sophus::SE3d(Eigen::Matrix3d::Identity(), Eigen::Vector3d(0,0,0));
visToolkit->plotTrajectoryNextStep(window, i, translGTAccumulated, translEstimAccumulatedLeft, translEstimAccumulatedRight, groundTruthPoses[i], groundTruthPrevPose, pose_left, pose_right, pose_left, pose_right);
//} else if (i >= (keyFrameStep*numKeyFrames)){
} else if (i % (keyFrameStep*numKeyFrames) == 0){
std::vector<Sophus::SE3d> cumPosesRight = slam.getPoses_right();
std::vector<Sophus::SE3d> cumPosesLeft = slam.getPoses_left();
visToolkit->replotTrajectory(window, i, translGTAccumulated, translEstimAccumulatedLeft, translEstimAccumulatedRight, cumPosesLeft, cumPosesRight, groundTruthPoses);
} else {
std::cout << "Frame " << i << " / " << groundTruthPoses.size() << std::endl;
Sophus::SE3d prevCumPoseLeft = slam.getPose_left(i-1);
Sophus::SE3d prevCumPoseRight = slam.getPose_right(i-1);
visToolkit->plotTrajectoryNextStep(window, i, translGTAccumulated, translEstimAccumulatedLeft, translEstimAccumulatedRight, groundTruthPoses[i], groundTruthPoses[i-1], cumPoseLeft, cumPoseRight, prevCumPoseLeft, prevCumPoseRight);
}
cv::imshow("Trajectory", window);
cv::waitKey(3);
}
#endif
}
#ifdef VIS_POSES
visToolkit->visualizeAllPoses(slam.getPoses_left(), slam.getCameraMatrix());
#endif
viewerThread->join();
//disparityThread->join();
delete viewer;
delete visToolkit;
cv::imwrite("result_trajectories.png", window);
return 0;
}