目录
??? 在orbslam3中,在最后的主循环前面需要进行初始化操作。
rs2::pipeline_profile pipe_profile = pipe.start(cfg, imu_callback);
vector<ORB_SLAM3::IMU::Point> vImuMeas;
rs2::stream_profile cam_left = pipe_profile.get_stream(RS2_STREAM_INFRARED, 1);
rs2::stream_profile cam_right = pipe_profile.get_stream(RS2_STREAM_INFRARED, 2);
rs2::stream_profile imu_stream = pipe_profile.get_stream(RS2_STREAM_GYRO);
float* Rbc = cam_left.get_extrinsics_to(imu_stream).rotation;
float* tbc = cam_left.get_extrinsics_to(imu_stream).translation;
std::cout << "Tbc (left) = " << std::endl;
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++)
std::cout << Rbc[i*3 + j] << ", ";
std::cout << tbc[i] << "\n";
}
float* Rlr = cam_right.get_extrinsics_to(cam_left).rotation;
float* tlr = cam_right.get_extrinsics_to(cam_left).translation;
std::cout << "Tlr = " << std::endl;
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++)
std::cout << Rlr[i*3 + j] << ", ";
std::cout << tlr[i] << "\n";
}
rs2_intrinsics intrinsics_left = cam_left.as<rs2::video_stream_profile>().get_intrinsics();
width_img = intrinsics_left.width;
height_img = intrinsics_left.height;
cout << "Left camera: \n";
std::cout << " fx = " << intrinsics_left.fx << std::endl;
std::cout << " fy = " << intrinsics_left.fy << std::endl;
std::cout << " cx = " << intrinsics_left.ppx << std::endl;
std::cout << " cy = " << intrinsics_left.ppy << std::endl;
std::cout << " height = " << intrinsics_left.height << std::endl;
std::cout << " width = " << intrinsics_left.width << std::endl;
std::cout << " Coeff = " << intrinsics_left.coeffs[0] << ", " << intrinsics_left.coeffs[1] << ", " <<
intrinsics_left.coeffs[2] << ", " << intrinsics_left.coeffs[3] << ", " << intrinsics_left.coeffs[4] << ", " << std::endl;
std::cout << " Model = " << intrinsics_left.model << std::endl;
rs2_intrinsics intrinsics_right = cam_right.as<rs2::video_stream_profile>().get_intrinsics();
width_img = intrinsics_right.width;
height_img = intrinsics_right.height;
cout << "Right camera: \n";
std::cout << " fx = " << intrinsics_right.fx << std::endl;
std::cout << " fy = " << intrinsics_right.fy << std::endl;
std::cout << " cx = " << intrinsics_right.ppx << std::endl;
std::cout << " cy = " << intrinsics_right.ppy << std::endl;
std::cout << " height = " << intrinsics_right.height << std::endl;
std::cout << " width = " << intrinsics_right.width << std::endl;
std::cout << " Coeff = " << intrinsics_right.coeffs[0] << ", " << intrinsics_right.coeffs[1] << ", " <<
intrinsics_right.coeffs[2] << ", " << intrinsics_right.coeffs[3] << ", " << intrinsics_right.coeffs[4] << ", " << std::endl;
std::cout << " Model = " << intrinsics_right.model << std::endl;
// Create SLAM system. It initializes all system threads and gets ready to process frames.
ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::IMU_STEREO, true, 0, file_name);
float imageScale = SLAM.GetImageScale();
double timestamp;
cv::Mat im, imRight;
// Clear IMU vectors
v_gyro_data.clear();
v_gyro_timestamp.clear();
v_accel_data_sync.clear();
v_accel_timestamp_sync.clear();
double t_resize = 0.f;
double t_track = 0.f;
2.1 从通道获取数据流
rs2::pipeline_profile pipe_profile = pipe.start(cfg, imu_callback);
vector<ORB_SLAM3::IMU::Point> vImuMeas;
rs2::stream_profile cam_left = pipe_profile.get_stream(RS2_STREAM_INFRARED, 1);
rs2::stream_profile cam_right = pipe_profile.get_stream(RS2_STREAM_INFRARED, 2);
rs2::stream_profile imu_stream = pipe_profile.get_stream(RS2_STREAM_GYRO);
??? 这段代码是使用Intel RealSense SDK 2.0与可能是ORB_SLAM3的视觉SLAM库的结合。下面是代码各部分的解释:
rs2::pipeline_profile pipe_profile = pipe.start(cfg, imu_callback);
cfg
是设备的配置,它指定了哪些流(例如,彩色、深度、IMU等)应该被激活和它们的参数设置。imu_callback
是一个回调函数,它会在接收到新的IMU数据时被调用。这个回调函数处理IMU数据,可能将其传递给SLAM系统。vector<ORB_SLAM3::IMU::Point> vImuMeas;
rs2::stream_profile cam_left = pipe_profile.get_stream(RS2_STREAM_INFRARED, 1);
和rs2::stream_profile cam_right = pipe_profile.get_stream(RS2_STREAM_INFRARED, 2);
RS2_STREAM_INFRARED
指定了红外流,而1
和2
分别指定了左红外摄像头和右红外摄像头。rs2::stream_profile imu_stream = pipe_profile.get_stream(RS2_STREAM_GYRO);
??? 总的来说,这段代码的目的是启动一个RealSense pipeline,获取必要的流配置(包括红外摄像头和IMU),并准备接收和处理这些数据。这些数据可能用于ORB_SLAM3这样的视觉SLAM系统,以实现基于视觉和IMU的定位和地图构建。
获取IMU和左红外摄像头之间的外参:
float* Rbc = cam_left.get_extrinsics_to(imu_stream).rotation;
float* tbc = cam_left.get_extrinsics_to(imu_stream).translation;
??? 这里,get_extrinsics_to
方法用于获取左红外摄像头(cam_left
)相对于IMU(imu_stream
)的旋转(rotation)和平移(translation)。这些值分别存储在 Rbc
和 tbc
中。
打印IMU和左红外摄像头之间的外参:
std::cout << "Tbc (left) = " << std::endl;
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++)
std::cout << Rbc[i*3 + j] << ", ";
std::cout << tbc[i] << "\n";
}
??? 这部分代码将左红外摄像头相对于IMU的旋转矩阵和平移向量打印出来。旋转矩阵是一个3x3的矩阵,而平移向量是一个3x1的向量。
获取右红外摄像头和左红外摄像头之间的外参:
float* Rlr = cam_right.get_extrinsics_to(cam_left).rotation;
float* tlr = cam_right.get_extrinsics_to(cam_left).translation;
??? 这里,get_extrinsics_to
方法用于获取右红外摄像头(cam_right
)相对于左红外摄像头(cam_left
)的旋转和平移。这些值分别存储在 Rlr
和 tlr
中。
打印右红外摄像头和左红外摄像头之间的外参:
std::cout << "Tlr = " << std::endl;
for(int i = 0; i<3; i++){
for(int j = 0; j<3; j++)
std::cout << Rlr[i*3 + j] << ", ";
std::cout << tlr[i] << "\n";
}
??? 这部分代码将右红外摄像头相对于左红外摄像头的旋转矩阵和平移向量打印出来。
??? 总的来说,这段代码的主要目的是获取并打印摄像头与IMU之间,以及左右红外摄像头之间的外参,这些参数在后续的数据融合和处理中是非常重要的。
内参通常用于计算机视觉中的相机标定,它们描述了如何将相机捕获的2D图像坐标映射到3D世界坐标。这些参数包括焦距(fx和fy)、主点坐标(cx和cy,通常是图像中心)、图像的高度和宽度,以及径向和切向畸变系数(在这里是一个包含5个元素的数组coeffs)。
代码的具体步骤如下:
从左红外摄像头获取内参:
cam_left.as<rs2::video_stream_profile>().get_intrinsics()
:将cam_left
(一个流配置对象)转换为video_stream_profile
类型,并调用get_intrinsics()
方法获取内参。intrinsics_left
变量中。打印左摄像头的内参:
std::cout
打印出左摄像头的焦距(fx和fy)、主点坐标(cx和cy)、图像高度和宽度,以及畸变系数(coeffs数组中的值)。从右红外摄像头获取内参:
cam_right.as<rs2::video_stream_profile>().get_intrinsics()
:与左摄像头类似,这里是从右摄像头cam_right
获取内参。intrinsics_right
变量中。打印右摄像头的内参:
std::cout
打印出右摄像头的内参。这段代码的目的是让用户能够查看和理解摄像头的内参,这些参数对于后续的图像处理、三维重建等任务是非常重要的。