OpenCV双目视觉三角测量代码实现C++

在双目视觉系统中,三角测量是一种基于几何原理的三维重建技术,通过分析同一场景在两个不同视角下的二维图像来确定物体的三维坐标。这种方法的核心在于利用摄像机的内参和相对位姿(由旋转矩阵和平移向量描述),将图像中的像素坐标转换为三维空间中的射线,并通过求解这些射线的交点来确定物体的空间位置。

三角测量的原理:

1. 摄像机模型:每个摄像机都由一个内参矩阵(K)和一个外参矩阵(由旋转矩阵R和平移向量T组成)来描述。内参矩阵包含了摄像机的焦距和主点坐标,而外参矩阵描述了摄像机在世界坐标系中的位置和方向。

2. 图像点到射线的转换:给定图像中的一个点,通过内参矩阵可以将其转换为摄像机坐标系下的射线。图像点的齐次坐标通过内参矩阵的逆矩阵进行转换。

3. 射线的交点:在双目视觉系统中,同一物体在两个摄像机的图像中形成的两个射线,理论上应该在三维空间中相交于一点,即该物体的真实位置。通过解线性方程组或最小二乘问题,可以找到这个交点。

解算过程:

1. 预处理:首先,使用内参矩阵的逆矩阵将图像中的像素坐标转换为摄像机坐标系下的齐次坐标。

2. 构建射线方程:利用摄像机的投影矩阵(由内参矩阵和外参矩阵的乘积得到),构建两条射线的方程。这些方程通常表示为射线的方向向量和射线上的一点。

3. 三角测量:通过求解两条射线的交点,即求解由两个摄像机的投影矩阵和对应的图像点构成的线性方程组,来确定物体的三维坐标。

4. 优化:由于噪声和误差,直接求解可能不会得到精确的交点。因此,通常采用最小二乘法来找到一个最佳近似解,该解最小化了从两个视角观测到的图像点与实际图像点之间的误差。

5. 结果输出:最后,输出计算得到的三维坐标,这些坐标表示了物体在世界坐标系中的位置。

在代码中,可以通过OpenCV库中的`triangulatePoints`函数实现三角测量的核心步骤,`LinearLSTriangulation`函数通过直接解线性方程组来求解射线的交点,从而得到物体的三维坐标。具体代码如下:

(1)以图片方式读取获得角点像素坐标:
// 三角测量(进行特征提取,并计算空间坐标)
#include<opencv2/calib3d/calib3d.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>

#include <opencv2/opencv.hpp>

#include<iostream>
#include <opencv2/core/traits.hpp>
#include "opencv2/features2d.hpp"
#include <opencv2/features2d/features2d.hpp>
#include <chrono>
#include <math.h>
#include <fstream>
#include <sstream>
#include <opencv2/core/utils/logger.hpp> //隐藏日志

using namespace std;
using namespace cv;

Mat_<double> LinearLSTriangulation(
	Point3d u,//homogenous image point (u,v,1)  
	Matx34d P,//camera 1 matrix  
	Point3d u1,//homogenous image point in 2nd camera  
	Matx34d P1//camera 2 matrix  
);
void reconstruct(Mat& K1, Mat& K2, Mat& R, Mat& T, vector<Point2f>& p1, vector<Point2f>& p2, Mat& structure);
Point2f pixel2cam(const Point2d& p, const Mat& K);




int main()
{
	cv::utils::logging::setLogLevel(utils::logging::LOG_LEVEL_SILENT);//不再输出日志

	//或
	//utils::logging::setLogLevel(utils::logging::LOG_LEVEL_ERROR);//只输出错误日志

	Size image_size;
	Size board_size = Size(11, 8);//图片上棋盘格(定标板)的内角点个数(行、列的角点数)
	vector<Point2f> image_corners1;//缓存每幅图像上检测到的角点数
	vector<Point2f> image_corners2;
	vector<vector<Point2f>> corners_Seq;//保存检测到的所有角点
	vector<Point2f> p1, p2, p3, p4;

	Mat K1(Matx33d(
		2.4454469774229988e+03, 0, 1.1904497601237404e+03, 0,
		2.4501877368468731e+03, 9.6797955993264543e+02, 0, 0, 1));//L
	Mat K2(Matx33d(
		2.4495657252877713e+03, 0, 1.2021880540540637e+03, 0,
		2.4547868050821821e+03, 1.0231012864078014e+03, 0, 0, 1));//R

	Mat Kinv1;
	Mat Kinv2;
	invert(K1, Kinv1, DECOMP_LU);
	invert(K2, Kinv2, DECOMP_LU);

	Mat R = (Mat_<double>(3, 3) << 9.9930463873988629e-01, 1.1239278405046448e-02,
		-3.5551619005033626e-02, -1.0815154950411883e-02,
		9.9986830622891665e-01, 1.2099695133177138e-02,
		3.5682928920476704e-02, -1.1706785205644879e-02,
		9.9929459108103103e-01);
	Mat T = (Mat_<double>(3, 1) << -3.1067452891668921e+02, 3.5113614087664948e+00,
		-2.3577131931354245e+00);

	Mat structure1, structure2;


	/*********************读入图像,检测角点********************************************/

	string imageFileName1, imageFileName2;


	imageFileName1 = "l1.bmp";
	imageFileName2 = "r1.bmp";

	cout << imageFileName1 << endl;
	cout << imageFileName2 << endl;
	Mat image1 = imread(imageFileName1);
	Mat image2 = imread(imageFileName2);
	image_size = image1.size();
	Mat image_gray1, image_gray2;
	Mat dstImage1, dstImage2;
	cvtColor(image1, image_gray1, cv::COLOR_BGR2GRAY);
	cvtColor(image2, image_gray2, cv::COLOR_BGR2GRAY);


	//检测棋盘格标定板的角点
	bool patternfound1 = findChessboardCorners(image1, board_size, image_corners1, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE);
	bool patternfound2 = findChessboardCorners(image2, board_size, image_corners2, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE);
	//+ CALIB_CB_FAST_CHECK);
	if (!patternfound1 && !patternfound2)
	{
		cout << "Can not find chessboard corners!" << endl;
		return -1;
	}
	else
	{
		/* 亚像素精确化 */
		cornerSubPix(image_gray1, image_corners1, Size(11, 11), Size(-1, -1), TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));
		cornerSubPix(image_gray2, image_corners2, Size(11, 11), Size(-1, -1), TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));

		Mat imageTemp1 = image1.clone();
		Mat imageTemp2 = image2.clone();
		for (int j1 = 0; j1 < image_corners1.size(); j1++)
		{
			circle(imageTemp1, image_corners1[j1], 10, Scalar(0, 0, 255), 2, 8, 0);
		}
		for (int j2 = 0; j2 < image_corners2.size(); j2++)
		{
			circle(imageTemp2, image_corners2[j2], 10, Scalar(0, 255, 0), 2, 8, 0);
		}
		namedWindow("左图所有角点", WINDOW_NORMAL);
		imshow("左图所有角点", imageTemp1);
		namedWindow("右图所有角点", WINDOW_NORMAL);
		imshow("右图所有角点", imageTemp2);

		int n = 2;
		int m = 3;//测量角点选取

		Point2f k1, k2, k3, k4;
		k1 = image_corners1[n];
		k2 = image_corners1[m];
		k3 = image_corners2[n];
		k4 = image_corners2[m];


		circle(image1, k1, 4, Scalar(255, 0, 255), 2, 8, 0);
		circle(image1, k2, 4, Scalar(255, 255, 0), 2, 8, 0);
		circle(image2, k3, 4, Scalar(255, 0, 255), 2, 8, 0);
		circle(image2, k4, 4, Scalar(255, 255, 0), 2, 8, 0);
		namedWindow("左图选中角点", WINDOW_NORMAL);
		imshow("左图选中角点", image1);
		namedWindow("右图选中角点", WINDOW_NORMAL);
		imshow("右图选中角点", image2);
		cout << "左图点1坐标为(" << k1.x << "," << k1.y << "),点2坐标为(" << k2.x << "," << k2.y << ");" << endl;
		cout << "右图点1坐标为(" << k3.x << "," << k3.y << "),点2坐标为(" << k4.x << "," << k4.y << ");" << endl;

		//前一个点
		p1.push_back(k1);
		p3.push_back(k3);
		reconstruct(K1, K2, R, T, p1, p3, structure1);
		float a1 = structure1.at<float>(0, 0) / structure1.at<float>(3, 0);
		float b1 = structure1.at<float>(1, 0) / structure1.at<float>(3, 0);
		float c1 = structure1.at<float>(2, 0) / structure1.at<float>(3, 0);

		Point3d points1;
		points1.x = a1;
		points1.y = b1;
		points1.z = c1;

		Point3d u1(k1.x, k1.y, 1); Point3d u2(k3.x, k3.y, 1);
		Mat_<double> um_1 = Kinv1 * Mat_<double>(u1);
		u1 = Point3d(um_1);
		Mat_<double> um_2 = Kinv2 * Mat_<double>(u2);
		u2 = Point3d(um_2);

		Matx34d P(1, 0, 0, 0,
			0, 1, 0, 0,
			0, 0, 1, 0);


		Matx34d P1(9.9930463873988629e-01, 1.1239278405046448e-02, -3.5551619005033626e-02, -3.1067452891668921e+02,
			-1.0815154950411883e-02,9.9986830622891665e-01, 1.2099695133177138e-02, 3.5113614087664948e+00,
			3.5682928920476704e-02, -1.1706785205644879e-02,9.9929459108103103e-01, -2.3577131931354245e+00);//第二张图像的相机位姿,结合工程5和7(0125标定图像3)

		//Mat R = (Mat_<double>(3, 3) << 9.9930463873988629e-01, 1.1239278405046448e-02,
		//	-3.5551619005033626e-02, -1.0815154950411883e-02,
		//	9.9986830622891665e-01, 1.2099695133177138e-02,
		//	3.5682928920476704e-02, -1.1706785205644879e-02,
		//	9.9929459108103103e-01);
		//Mat T = (Mat_<double>(3, 1) << -3.1067452891668921e+02, 3.5113614087664948e+00,
		//	-2.3577131931354245e+00);

		Mat_<double>X_1 = LinearLSTriangulation(u1, P, u2, P1);
		double er1 = sqrt((a1 - X_1(0)) * (a1 - X_1(0)) + (b1 - X_1(1)) * (b1 - X_1(1)) + (c1 - X_1(2)) * (c1 - X_1(2)));


		Point2d pt1_cam_1 = pixel2cam(k1, K1);
		Point2d pt1_cam_3d_1(
			points1.x / points1.z,
			points1.y / points1.z
		);
		Point2f pt2_cam_1 = pixel2cam(k3, K2);
		Mat pt2_trans_1 = R * (Mat_<double>(3, 1) << points1.x, points1.y, points1.z) + T;
		pt2_trans_1 /= pt2_trans_1.at<double>(2, 0);

		double n_x1 = K2.at<double>(0, 0) * pt2_trans_1.at<double>(0, 0) + K2.at<double>(0, 2);
		double n_y1 = K2.at<double>(1, 1) * pt2_trans_1.at<double>(1, 0) + K2.at<double>(1, 2);

		double ddd_p1 = sqrt((n_x1 - k3.x) * (n_x1 - k3.x) + (n_y1 - k3.y) * (n_y1 - k3.y));


		//后一个点
		p2.push_back(k2);
		p4.push_back(k4);
		reconstruct(K1, K2, R, T, p2, p4, structure2);
		float a2 = structure2.at<float>(0, 0) / structure2.at<float>(3, 0);
		float b2 = structure2.at<float>(1, 0) / structure2.at<float>(3, 0);
		float c2 = structure2.at<float>(2, 0) / structure2.at<float>(3, 0);

		Point3d points2;
		points2.x = a2;
		points2.y = b2;
		points2.z = c2;

		Point3d uu1(k2.x, k2.y, 1); Point3d uu2(k4.x, k4.y, 1);
		Mat_<double> uum_1 = Kinv1 * Mat_<double>(uu1);
		uu1 = Point3d(uum_1);
		Mat_<double> uum_2 = Kinv2 * Mat_<double>(uu2);
		uu2 = Point3d(uum_2);



		Mat_<double>X_2 = LinearLSTriangulation(uu1, P, uu2, P1);
		double er2 = sqrt((a2 - X_2(0)) * (a2 - X_2(0)) + (b2 - X_2(1)) * (b2 - X_2(1)) + (c2 - X_2(2)) * (c2 - X_2(2)));


		Point2d pt1_cam_2 = pixel2cam(k2, K1);
		Point2d pt1_cam_3d_2(
			points2.x / points2.z,
			points2.y / points2.z
		);
		Point2f pt2_cam_2 = pixel2cam(k4, K2);
		Mat pt2_trans_2 = R * (Mat_<double>(3, 1) << points2.x, points2.y, points2.z) + T;
		pt2_trans_2 /= pt2_trans_2.at<double>(2, 0);

		double n_x2 = K2.at<double>(0, 0) * pt2_trans_2.at<double>(0, 0) + K2.at<double>(0, 2);
		double n_y2 = K2.at<double>(1, 1) * pt2_trans_2.at<double>(1, 0) + K2.at<double>(1, 2);

		double ddd_p2 = sqrt((n_x2 - k4.x) * (n_x2 - k4.x) + (n_y2 - k4.y) * (n_y2 - k4.y));


		//两点间的距离
		double d1 = sqrt((a1 - a2) * (a1 - a2) + (b1 - b2) * (b1 - b2) + (c1 - c2) * (c1 - c2));
		double d2 = sqrt((X_1(0) - X_2(0)) * (X_1(0) - X_2(0)) + (X_1(1) - X_2(1)) * (X_1(1) - X_2(1)) + (X_1(2) - X_2(2)) * (X_1(2) - X_2(2)));



		cout << "自带计算出的点1三维坐标为" << endl;
		cout << "(" << a1 << "," << b1 << "," << c1 << ")" << endl;
		cout << "编写计算出的点1三维坐标为" << endl;
		cout << "(" << X_1(0) << "," << X_1(1) << "," << X_1(2) << ")" << endl;
		cout << "两坐标误差距离为" << er1 << "mm" << endl;

		cout << endl << "自带计算出的点2三维坐标为" << endl;
		cout << "(" << a2 << "," << b2 << "," << c2 << ")" << endl;
		cout << "编写计算出的点2三维坐标为" << endl;
		cout << "(" << X_2(0) << "," << X_2(1) << "," << X_2(2) << ")" << endl;
		cout << "两坐标误差距离为" << er2 << "mm" << endl;


		cout << endl << "自带计算出的点1和点2的距离为" << d1 << "mm" << endl;
		cout << "编写计算出的点1和点2的距离为" << d2 << "mm" << endl;

	}

	waitKey(0);
	return 0;
}


Point2f pixel2cam(const Point2d& p, const Mat& K)
{
	return Point2f
	(
		(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
		(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
	);
}

Mat_<double> LinearLSTriangulation(
	Point3d u,//homogenous image point (u,v,1)  
	Matx34d P,//camera 1 matrix  
	Point3d u1,//homogenous image point in 2nd camera  
	Matx34d P1//camera 2 matrix  
)
{
	//build A matrix  
	Matx43d A(u.x * P(2, 0) - P(0, 0), u.x * P(2, 1) - P(0, 1), u.x * P(2, 2) - P(0, 2),
		u.y * P(2, 0) - P(1, 0), u.y * P(2, 1) - P(1, 1), u.y * P(2, 2) - P(1, 2),
		u1.x * P1(2, 0) - P1(0, 0), u1.x * P1(2, 1) - P1(0, 1), u1.x * P1(2, 2) - P1(0, 2),
		u1.y * P1(2, 0) - P1(1, 0), u1.y * P1(2, 1) - P1(1, 1), u1.y * P1(2, 2) - P1(1, 2)
	);
	//build B vector  
	Matx41d B(-(u.x * P(2, 3) - P(0, 3)),
		-(u.y * P(2, 3) - P(1, 3)),
		-(u1.x * P1(2, 3) - P1(0, 3)),
		-(u1.y * P1(2, 3) - P1(1, 3)));
	//solve for X  
	Mat_<double> X;
	solve(A, B, X, DECOMP_SVD);
	return X;
}

void reconstruct(Mat& K1, Mat& K2, Mat& R, Mat& T, vector<Point2f>& p1, vector<Point2f>& p2, Mat& structure)
{
	Mat proj1(3, 4, CV_32FC1);
	Mat proj2(3, 4, CV_32FC1);

	proj1(Range(0, 3), Range(0, 3)) = Mat::eye(3, 3, CV_32FC1);
	proj1.col(3) = Mat::zeros(3, 1, CV_32FC1);
	cout << "相机1的RT矩阵为" << proj1 << endl;
	R.convertTo(proj2(Range(0, 3), Range(0, 3)), CV_32FC1);
	T.convertTo(proj2.col(3), CV_32FC1);
	cout << "相机2的RT矩阵为" << proj2 << endl;
	Mat fK1, fK2;
	K1.convertTo(fK1, CV_32FC1);
	K2.convertTo(fK2, CV_32FC1);
	proj1 = fK1 * proj1;
	proj2 = fK2 * proj2;
	cout << endl;


	triangulatePoints(proj1, proj2, p1, p2, structure);
}

运行结果如下:

注:在代码中使用棋盘格验证了三角测量结果的准确性(这里棋盘格的大小是50mm,选取编号2和3角点,角点编号从0开始)。 

(2)已知像素坐标直接解算三维坐标: 
// 三角测量(进行特征提取,并计算空间坐标)
#include<opencv2/calib3d/calib3d.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>

#include <opencv2/opencv.hpp>

#include<iostream>
#include <opencv2/core/traits.hpp>
#include "opencv2/features2d.hpp"
#include <opencv2/features2d/features2d.hpp>
#include <chrono>
#include <math.h>
#include <fstream>
#include <sstream>

using namespace std;
using namespace cv;

Mat_<double> LinearLSTriangulation(
	Point3d u,//homogenous image point (u,v,1)  
	Matx34d P,//camera 1 matrix  
	Point3d u1,//homogenous image point in 2nd camera  
	Matx34d P1//camera 2 matrix  
);
void reconstruct(Mat& K1, Mat& K2, Mat& R, Mat& T, vector<Point2f>& p1, vector<Point2f>& p2, Mat& structure);
Point2f pixel2cam(const Point2d& p, const Mat& K);

int main()
{
	vector<Point2f> p1, p2, p3, p4;

	Mat K1(Matx33d(
		2.4454469774229988e+03, 0, 1.1904497601237404e+03, 0,
		2.4501877368468731e+03, 9.6797955993264543e+02, 0, 0, 1));//L
	Mat K2(Matx33d(
		2.4495657252877713e+03, 0, 1.2021880540540637e+03, 0,
		2.4547868050821821e+03, 1.0231012864078014e+03, 0, 0, 1));//R

	Mat Kinv1;
	Mat Kinv2;
	invert(K1, Kinv1, DECOMP_LU);
	invert(K2, Kinv2, DECOMP_LU);

	Mat R = (Mat_<double>(3, 3) << 9.9930463873988629e-01, 1.1239278405046448e-02,
		-3.5551619005033626e-02, -1.0815154950411883e-02,
		9.9986830622891665e-01, 1.2099695133177138e-02,
		3.5682928920476704e-02, -1.1706785205644879e-02,
		9.9929459108103103e-01);
	Mat T = (Mat_<double>(3, 1) << -3.1067452891668921e+02, 3.5113614087664948e+00,
		-2.3577131931354245e+00);

	Mat structure1, structure2;
	cv::Point2f k1(1073.45f, 1036.86f);
	cv::Point2f k2(1147.08f, 1037.43f);
	cv::Point2f k3(493.27f, 1136.36f);
	cv::Point2f k4(568.306f, 1136.03f);

	cout << "左图点1坐标为(" << k1.x << "," << k1.y << "),点2坐标为(" << k2.x << "," << k2.y << ");" << endl;
	cout << "右图点1坐标为(" << k3.x << "," << k3.y << "),点2坐标为(" << k4.x << "," << k4.y << ");" << endl;

	//前一个点
	p1.push_back(k1);
	p3.push_back(k3);
	reconstruct(K1, K2, R, T, p1, p3, structure1);
	float a1 = structure1.at<float>(0, 0) / structure1.at<float>(3, 0);
	float b1 = structure1.at<float>(1, 0) / structure1.at<float>(3, 0);
	float c1 = structure1.at<float>(2, 0) / structure1.at<float>(3, 0);

	Point3d points1;
	points1.x = a1;
	points1.y = b1;
	points1.z = c1;

	Point3d u1(k1.x, k1.y, 1); Point3d u2(k3.x, k3.y, 1);
	Mat_<double> um_1 = Kinv1 * Mat_<double>(u1);
	u1 = Point3d(um_1);
	Mat_<double> um_2 = Kinv2 * Mat_<double>(u2);
	u2 = Point3d(um_2);

	Matx34d P(1, 0, 0, 0,
		0, 1, 0, 0,
		0, 0, 1, 0);


	Matx34d P1(9.9930463873988629e-01, 1.1239278405046448e-02, -3.5551619005033626e-02, -3.1067452891668921e+02,
		-1.0815154950411883e-02, 9.9986830622891665e-01, 1.2099695133177138e-02, 3.5113614087664948e+00,
		3.5682928920476704e-02, -1.1706785205644879e-02, 9.9929459108103103e-01, -2.3577131931354245e+00);//第二张图像的相机位姿,结合工程5和7(0125标定图像3)


	Mat_<double>X_1 = LinearLSTriangulation(u1, P, u2, P1);
	double er1 = sqrt((a1 - X_1(0)) * (a1 - X_1(0)) + (b1 - X_1(1)) * (b1 - X_1(1)) + (c1 - X_1(2)) * (c1 - X_1(2)));


	Point2d pt1_cam_1 = pixel2cam(k1, K1);
	Point2d pt1_cam_3d_1(
		points1.x / points1.z,
		points1.y / points1.z
	);
	Point2f pt2_cam_1 = pixel2cam(k3, K2);
	Mat pt2_trans_1 = R * (Mat_<double>(3, 1) << points1.x, points1.y, points1.z) + T;
	pt2_trans_1 /= pt2_trans_1.at<double>(2, 0);

	double n_x1 = K2.at<double>(0, 0) * pt2_trans_1.at<double>(0, 0) + K2.at<double>(0, 2);
	double n_y1 = K2.at<double>(1, 1) * pt2_trans_1.at<double>(1, 0) + K2.at<double>(1, 2);

	double ddd_p1 = sqrt((n_x1 - k3.x) * (n_x1 - k3.x) + (n_y1 - k3.y) * (n_y1 - k3.y));


	//后一个点
	p2.push_back(k2);
	p4.push_back(k4);
	reconstruct(K1, K2, R, T, p2, p4, structure2);
	float a2 = structure2.at<float>(0, 0) / structure2.at<float>(3, 0);
	float b2 = structure2.at<float>(1, 0) / structure2.at<float>(3, 0);
	float c2 = structure2.at<float>(2, 0) / structure2.at<float>(3, 0);

	Point3d points2;
	points2.x = a2;
	points2.y = b2;
	points2.z = c2;

	Point3d uu1(k2.x, k2.y, 1); Point3d uu2(k4.x, k4.y, 1);
	Mat_<double> uum_1 = Kinv1 * Mat_<double>(uu1);
	uu1 = Point3d(uum_1);
	Mat_<double> uum_2 = Kinv2 * Mat_<double>(uu2);
	uu2 = Point3d(uum_2);



	Mat_<double>X_2 = LinearLSTriangulation(uu1, P, uu2, P1);
	double er2 = sqrt((a2 - X_2(0)) * (a2 - X_2(0)) + (b2 - X_2(1)) * (b2 - X_2(1)) + (c2 - X_2(2)) * (c2 - X_2(2)));


	Point2d pt1_cam_2 = pixel2cam(k2, K1);
	Point2d pt1_cam_3d_2(
		points2.x / points2.z,
		points2.y / points2.z
	);
	Point2f pt2_cam_2 = pixel2cam(k4, K2);
	Mat pt2_trans_2 = R * (Mat_<double>(3, 1) << points2.x, points2.y, points2.z) + T;
	pt2_trans_2 /= pt2_trans_2.at<double>(2, 0);

	double n_x2 = K2.at<double>(0, 0) * pt2_trans_2.at<double>(0, 0) + K2.at<double>(0, 2);
	double n_y2 = K2.at<double>(1, 1) * pt2_trans_2.at<double>(1, 0) + K2.at<double>(1, 2);

	double ddd_p2 = sqrt((n_x2 - k4.x) * (n_x2 - k4.x) + (n_y2 - k4.y) * (n_y2 - k4.y));


	//两点间的距离
	double d1 = sqrt((a1 - a2) * (a1 - a2) + (b1 - b2) * (b1 - b2) + (c1 - c2) * (c1 - c2));
	double d2 = sqrt((X_1(0) - X_2(0)) * (X_1(0) - X_2(0)) + (X_1(1) - X_2(1)) * (X_1(1) - X_2(1)) + (X_1(2) - X_2(2)) * (X_1(2) - X_2(2)));



	cout << "自带计算出的点1三维坐标为" << endl;
	cout << "(" << a1 << "," << b1 << "," << c1 << ")" << endl;
	cout << "编写计算出的点1三维坐标为" << endl;
	cout << "(" << X_1(0) << "," << X_1(1) << "," << X_1(2) << ")" << endl;
	cout << "两坐标误差距离为" << er1 << "mm" << endl;

	cout << endl << "自带计算出的点2三维坐标为" << endl;
	cout << "(" << a2 << "," << b2 << "," << c2 << ")" << endl;
	cout << "编写计算出的点2三维坐标为" << endl;
	cout << "(" << X_2(0) << "," << X_2(1) << "," << X_2(2) << ")" << endl;
	cout << "两坐标误差距离为" << er2 << "mm" << endl;


	cout << endl << "自带计算出的点1和点2的距离为" << d1 << "mm" << endl;
	cout << "编写计算出的点1和点2的距离为" << d2 << "mm" << endl;

	return 0;
}


Point2f pixel2cam(const Point2d& p, const Mat& K)
{
	return Point2f
	(
		(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
		(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
	);
}

Mat_<double> LinearLSTriangulation(
	Point3d u,//homogenous image point (u,v,1)  
	Matx34d P,//camera 1 matrix  
	Point3d u1,//homogenous image point in 2nd camera  
	Matx34d P1//camera 2 matrix  
)
{
	//build A matrix  
	Matx43d A(u.x * P(2, 0) - P(0, 0), u.x * P(2, 1) - P(0, 1), u.x * P(2, 2) - P(0, 2),
		u.y * P(2, 0) - P(1, 0), u.y * P(2, 1) - P(1, 1), u.y * P(2, 2) - P(1, 2),
		u1.x * P1(2, 0) - P1(0, 0), u1.x * P1(2, 1) - P1(0, 1), u1.x * P1(2, 2) - P1(0, 2),
		u1.y * P1(2, 0) - P1(1, 0), u1.y * P1(2, 1) - P1(1, 1), u1.y * P1(2, 2) - P1(1, 2)
	);
	//build B vector  
	Matx41d B(-(u.x * P(2, 3) - P(0, 3)),
		-(u.y * P(2, 3) - P(1, 3)),
		-(u1.x * P1(2, 3) - P1(0, 3)),
		-(u1.y * P1(2, 3) - P1(1, 3)));
	//solve for X  
	Mat_<double> X;
	solve(A, B, X, DECOMP_SVD);
	return X;
}

void reconstruct(Mat& K1, Mat& K2, Mat& R, Mat& T, vector<Point2f>& p1, vector<Point2f>& p2, Mat& structure)
{
	Mat proj1(3, 4, CV_32FC1);
	Mat proj2(3, 4, CV_32FC1);

	proj1(Range(0, 3), Range(0, 3)) = Mat::eye(3, 3, CV_32FC1);
	proj1.col(3) = Mat::zeros(3, 1, CV_32FC1);
	cout << "相机1的RT矩阵为" << proj1 << endl;
	R.convertTo(proj2(Range(0, 3), Range(0, 3)), CV_32FC1);
	T.convertTo(proj2.col(3), CV_32FC1);
	cout << "相机2的RT矩阵为" << proj2 << endl;
	Mat fK1, fK2;
	K1.convertTo(fK1, CV_32FC1);
	K2.convertTo(fK2, CV_32FC1);
	proj1 = fK1 * proj1;
	proj2 = fK2 * proj2;
	cout << endl;


	triangulatePoints(proj1, proj2, p1, p2, structure);
}

 运行结果如下:

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/896835.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

数据科学家必须掌握的12个Python功能

Python 已经成为数据科学家的必选语言&#xff0c;从数据处理到机器学习&#xff0c;它几乎无所不能。本文将探讨一些Python特性&#xff0c;这些特性不仅能帮助你编写更高效、更易读、更易维护的代码&#xff0c;还特别适合数据科学的需求&#xff0c;使你的代码简洁且优雅。 …

mysql8 使用idb文件实现数据备份

文章目录 1.备份脚本示例2.设置 Cron 任务3. 数据恢复 本文档只是为了留档方便以后工作运维&#xff0c;或者给同事分享文档内容比较简陋命令也不是特别全&#xff0c;不适合小白观看&#xff0c;如有不懂可以私信&#xff0c;上班期间都是在得 需求&#xff0c;在离线情况下实…

【Linux】————磁盘与文件系统

作者主页&#xff1a; 作者主页 本篇博客专栏&#xff1a;Linux 创作时间 &#xff1a;2024年10月17日 一、磁盘的物理结构 磁盘的物理结构如图所示&#xff1a; 其中具体的物理存储结构如下&#xff1a; 磁盘中存储的基本单位为扇区&#xff0c;一个扇区的大小一般为512字…

研发运营一体化(DevOps)能力成熟度模型

目录 应用设计 安全风险管理 技术运 持续交付 敏捷开发管理 基于微服务的端到端持续交付流水线案例 应用设计 安全风险管理 技术运 持续交付

14 django管理系统 - 注册与登录 - 注销

首先先创建注销的入口&#xff0c;在base.html中修改 <ul class"nav navbar-nav navbar-right"><li><a href"/account/login/">登录</a></li><li><a href"/account/register/">注册</a></l…

2024互联网大厂营收排名:京东/阿里/华为前三,超多技术岗都在热招!

2024年已经过去一大半&#xff0c;各大互联网大厂的竞争如火如荼&#xff0c;营收都取得了不俗的成绩&#xff0c;京东、阿里、华为分别占领前三&#xff01; 第四第五名则为华为和拼多多。 根据排行榜里的公司名单&#xff0c;小码特意为大家整理了一批各大厂的招聘岗位。 阿…

信息学奥赛复赛复习18-CSP-J2023-01小苹果-向上取整、向下取整、模拟算法

PDF文档回复:20241021 1 P9748 [CSP-J 2023] 小苹果 [题目描述] 小 Y 的桌子上放着 n 个苹果从左到右排成一列&#xff0c;编号为从 1 到 n。 小苞是小 Y 的好朋友&#xff0c;每天她都会从中拿走一些苹果。 每天在拿的时候&#xff0c;小苞都是从左侧第 1 个苹果开始、每隔…

微信碰一碰支付系统有哪些好的?教程详解抢先看!

支付宝“碰一碰支付”的风刚刚刮起来&#xff0c;它的老对手微信便紧随其后&#xff0c;推出了自己的碰一碰支付设备&#xff0c;再次印证了这个项目市场前景广阔的同时&#xff0c;也让与碰一碰支付系统相关问题的热度又上了一层楼&#xff0c;尤其是微信碰一碰支付系统有哪些…

设计模式(UML图、类之间关系、设计原则)

目录 一.类的UML图 1.类的UML图 2.类之间的关系 2.1 继承关系&#xff1a; 2.2关联关系 2.2.1单项关联 2.2.2双向关联 2.2.3自关联 2.3聚合关系 2.4组合模式 2.5依赖关系 二、设计三原则 2.1单一职责原则 2.2开放封闭原则 2.3依赖倒转原则 一.类的UML图 1.类的…

Originlab正态分布处理数据

一、图线绘制 首先&#xff0c;导入数据 点击左下角生成柱状图 绘制完成后&#xff0c;点击菜单栏【分析】-【拟合】-【非线性曲线拟合】-【打开对话框】 在函数中选择高斯即可 二、设置画布 宽、高为300.02 三、坐标轴格式 显示&#xff1a;上下左右轴 刻度线标签&#…

网络编程中容易踩的坑罗列,谨记!

1、TCP没考虑粘包分包 TCP是面向连接的可靠协议&#xff0c;TCP是流式协议&#xff0c;创建TCP套接字的类型为SOCK_STREAM int sockfd socket(AF_INET, SOCK_STREAM, 0);很多同学面试时对书上的话背诵如流&#xff0c;在实际TCP编程中却没有处理粘包和分包的代码&#xff0c;以…

十年编程路,一生踏征途

时光荏苒流逝&#xff0c;白驹匆匆过隙&#xff0c;不知不觉间&#xff0c;我已经在程序开发这条道路上走过了整整十年。从最初的求学&#xff0c;到如今成为一名较为资深的职业开发者&#xff0c;这一路充满了挑战、学习、成长与感动。在这1024程序员节的特殊时刻&#xff0c;…

mysql5.7.30绿色版安装

下载地址&#xff1a;MySQL :: Download MySQL Community Server (Archived Versions) 参考&#xff1a;【绿色版】Mysql下载、安装、配置与使用&#xff08;保姆级教程&#xff09;_mysql 绿色安装-CSDN博客 从下载地址中下载mysql&#xff0c;解压zip安装包&#xff0c;到想…

A Graph-Transformer for Whole SlideImage Classification文献笔记

基本信息 原文链接&#xff1a;[2205.09671] A graph-transformer for whole slide image classification (arxiv.org) 源码&#xff1a;https://github.com/vkola-lab/tmi2022 提出了一种融合了基于图的WSI表示和用于处理病理图像的视觉转换器&#xff0c;称为GTP&#xff…

听说现在二建不值钱了,还有必要考吗?

面对这样的现状&#xff0c;新手们都会不禁要问:考证真的有用吗?它是否值得我们投入时间和精力?首先&#xff0c;我觉得我们需要对自己有一个清晰的认识&#xff0c;包括你的就业状况。 ①如果你已经在事业单位工作&#xff0c;那么考证可能对你来说并不是那么必要 在事业单…

图片写入GPS经纬高信息

近期项目中需要往java平台传输图片&#xff0c;直接使用QNetworkAccessManager和QHttpMultipart类即可&#xff0c;其他博文中有分享。 主要是平台接口对所传输图片有要求&#xff1a;需要包含GPS信息&#xff08;经度、纬度、高度&#xff09;。 Qt无法直接实现&#xff0c;…

【React】React18核心源码解读

前言 本文使用 React18.2.0 的源码&#xff0c;如果想回退到某一版本执行git checkout tags/v18.2.0即可。如果打开源码发现js文件报ts类型错误请看本人另一篇文章&#xff1a;VsCode查看React源码全是类型报错如何解决。 阅读源码的过程&#xff1a; 下载源码 观察 package…

智源重磅发布 Emu3:颠覆AI多模态领域的革命性多模态大模型

在2024年10月21日&#xff0c;智源研究院正式发布了新一代的革命性多模态大模型——Emu3。这一突破标志着AI生成技术进入一个全新阶段&#xff0c;它不仅颠覆了当前的主流扩散模型&#xff08;例如Stable Diffusion&#xff09;&#xff0c;还为图像、文本和视频生成任务带来了…

HTML+CSS实现点赞效果

效果演示 HTMLCSS实现点赞效果 HTML <div class"heart-container" title"Like"><input type"checkbox" class"checkbox" id"Give-It-An-Id"><div class"svg-container"><svg viewBox&qu…

1.前提配置 关防火墙 关selinux

1.前提配置 关防火墙 关selinux 2.安装web服务程序nginx 未安装则需重新设置挂载点 若已安装&#xff0c;则查看系统中是否存在 3.当前主机添加多地址&#xff08;ip a&#xff09; 配置了三个IP地址 查看IP地址是否配置成功 4.自定义nginx配置文件通过多地址区分多网站 /…