我有 cameraMatrix
和 distCoeff
需要消除图像或点 vector 的失真。现在我想把它们扭曲回来。
Opencv 可以吗?
我记得我在 stackoverflow 上读到了一些关于它的内容,但现在找不到了。
编辑:我在 answer 中找到了这样做的方法.也在opencv开发者专区(在这个issue)
但我的结果并不正确。或多或少有2-4个像素的误差。我的代码中可能有问题,因为在答案中我链接了单元测试中的一切似乎都很好。可能键入从 float 到 double 的类型转换,或者我看不到的其他内容。
这是我的测试用例:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void distortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,
const cv::Mat & cameraMatrix, const cv::Mat & distorsionMatrix)
{
dst.clear();
double fx = cameraMatrix.at<double>(0,0);
double fy = cameraMatrix.at<double>(1,1);
double ux = cameraMatrix.at<double>(0,2);
double uy = cameraMatrix.at<double>(1,2);
double k1 = distorsionMatrix.at<double>(0, 0);
double k2 = distorsionMatrix.at<double>(0, 1);
double p1 = distorsionMatrix.at<double>(0, 2);
double p2 = distorsionMatrix.at<double>(0, 3);
double k3 = distorsionMatrix.at<double>(0, 4);
for (unsigned int i = 0; i < src.size(); i++)
{
const cv::Point2d & p = src[i];
double x = p.x;
double y = p.y;
double xCorrected, yCorrected;
//Step 1 : correct distorsion
{
double r2 = x*x + y*y;
//radial distorsion
xCorrected = x * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2);
yCorrected = y * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2);
//tangential distorsion
//The "Learning OpenCV" book is wrong here !!!
//False equations from the "Learning OpenCv" book below :
//xCorrected = xCorrected + (2. * p1 * y + p2 * (r2 + 2. * x * x));
//yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x);
//Correct formulae found at : http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/parameters.html
xCorrected = xCorrected + (2. * p1 * x * y + p2 * (r2 + 2. * x * x));
yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x * y);
}
//Step 2 : ideal coordinates => actual coordinates
{
xCorrected = xCorrected * fx + ux;
yCorrected = yCorrected * fy + uy;
}
dst.push_back(cv::Point2d(xCorrected, yCorrected));
}
}
int main(int /*argc*/, char** /*argv*/) {
cout << "OpenCV version: " << CV_MAJOR_VERSION << " " << CV_MINOR_VERSION << endl; // 2 4
Mat cameraMatrix = (Mat_<double>(3,3) << 1600, 0, 789, 0, 1600, 650, 0, 0, 1);
Mat distorsion = (Mat_<double>(5,1) << -0.48, 0, 0, 0, 0);
cout << "camera matrix: " << cameraMatrix << endl;
cout << "distorsion coefficent: " << distorsion << endl;
// the starting points
std::vector<Point2f> original_pts;
original_pts.push_back( Point2f(23, 358) );
original_pts.push_back( Point2f(8, 357) );
original_pts.push_back( Point2f(12, 342) );
original_pts.push_back( Point2f(27, 343) );
original_pts.push_back( Point2f(7, 350) );
original_pts.push_back( Point2f(-8, 349) );
original_pts.push_back( Point2f(-4, 333) );
original_pts.push_back( Point2f(12, 334) );
Mat original_m = Mat(original_pts);
// undistort
Mat undistorted_m;
undistortPoints(original_m, undistorted_m,
cameraMatrix, distorsion);
cout << "undistort points" << undistorted_m << endl;
// back to array
vector< cv::Point2d > undistorted_points;
for(int i=0; i<original_pts.size(); ++i) {
Point2d p;
p.x = undistorted_m.at<float>(i, 0);
p.y = undistorted_m.at<float>(i, 1);
undistorted_points.push_back( p );
// NOTE THAT HERE THERE IS AN APPROXIMATION
// WHAT IS IT? STD::COUT? CASTING TO FLOAT?
cout << undistorted_points[i] << endl;
}
vector< cv::Point2d > redistorted_points;
distortPoints(undistorted_points, redistorted_points, cameraMatrix, distorsion);
cout << redistorted_points << endl;
for(int i=0; i<original_pts.size(); ++i) {
cout << original_pts[i] << endl;
cout << redistorted_points[i] << endl;
Point2d o;
o.x = original_pts[i].x;
o.y = original_pts[i].y;
Point2d dist = redistorted_points[i] - o;
double norm = sqrt(dist.dot(dist));
std::cout << "distance = " << norm << std::endl;
cout << endl;
}
return 0;
}
这是我的输出:
OpenCV version: 2 4
camera matrix: [1600, 0, 789;
0, 1600, 650;
0, 0, 1]
distorsion coefficent: [-0.48; 0; 0; 0; 0]
undistort points[-0.59175861, -0.22557901; -0.61276215, -0.22988389; -0.61078846, -0.24211435; -0.58972651, -0.23759322; -0.61597037, -0.23630577; -0.63910204, -0.24136727; -0.63765121, -0.25489968; -0.61291695, -0.24926868]
[-0.591759, -0.225579]
[-0.612762, -0.229884]
[-0.610788, -0.242114]
[-0.589727, -0.237593]
[-0.61597, -0.236306]
[-0.639102, -0.241367]
[-0.637651, -0.2549]
[-0.612917, -0.249269]
[24.45809095301274, 358.5558144841519; 10.15042938413364, 357.806737955385; 14.23419751024494, 342.8856229036298; 28.51642501095819, 343.610956960508; 9.353743900129871, 350.9029663678638; -4.488033489615646, 350.326357275197; -0.3050714463695385, 334.477016554487; 14.41516474594289, 334.9822130217053]
[23, 358]
[24.4581, 358.556]
distance = 1.56044
[8, 357]
[10.1504, 357.807]
distance = 2.29677
[12, 342]
[14.2342, 342.886]
distance = 2.40332
[27, 343]
[28.5164, 343.611]
distance = 1.63487
[7, 350]
[9.35374, 350.903]
distance = 2.521
[-8, 349]
[-4.48803, 350.326]
distance = 3.75408
[-4, 333]
[-0.305071, 334.477]
distance = 3.97921
[12, 334]
[14.4152, 334.982]
distance = 2.60725
最佳答案
您提到的问题的答案之一中链接的 initUndistortRectifyMap
确实符合您的要求。由于它在 Remap
中用于构建完整的未失真图像,因此对于目标图像(未失真)中的每个位置,它给出了在失真图像中找到相应像素的位置,以便它们可以使用其颜色.所以它实际上是一个 f(undistorted) = distorted
映射。
但是,使用此 map 将只允许输入位置为整数且位于图像矩形内。 值得庆幸的是,文档给出了 full equations .
这主要是您所拥有的,只是您缺少一个预备步骤。 这是我的版本(它是 C# 但应该相同):
public PointF Distort(PointF point)
{
// To relative coordinates <- this is the step you are missing.
double x = (point.X - cx) / fx;
double y = (point.Y - cy) / fy;
double r2 = x*x + y*y;
// Radial distorsion
double xDistort = x * (1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
double yDistort = y * (1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
// Tangential distorsion
xDistort = xDistort + (2 * p1 * x * y + p2 * (r2 + 2 * x * x));
yDistort = yDistort + (p1 * (r2 + 2 * y * y) + 2 * p2 * x * y);
// Back to absolute coordinates.
xDistort = xDistort * fx + cx;
yDistort = yDistort * fy + cy;
return new PointF((float)xDistort, (float)yDistort);
}
关于c++ - Opencv:向后扭曲,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/21615298/