opencv - OpenCV:如何使用AffineTransformer

标签 opencv transformation matching shapes

您好,感谢您的帮助。

我想测试形状在OpenCV中用于匹配的情况,并设法完成匹配部分。
要找到旋转的形状,我要使AffineTransformer Class是正确的选择。由于我不知道匹配如何在内部进行,因此如果有人在其中描述了诉讼程序的链接,那就很好了。

正如肖申克提到的那样,我的以下代码引发断言失败错误,因为将变量match传递给estimateTransformation函数时为空。是否有人知道如何正确使用此功能-分别是它的真正作用?

#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;

bool rotateImage(Mat src, Mat &dst, double angle)
{
    // get rotation matrix for rotating the image around its center
    cv::Point2f center(src.cols/2.0, src.rows/2.0);
    cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
    // determine bounding rectangle
    cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
    // adjust transformation matrix
    rot.at<double>(0,2) += bbox.width/2.0 - center.x;
    rot.at<double>(1,2) += bbox.height/2.0 - center.y;

    cv::warpAffine(src, dst, rot, bbox.size());
    return 1;
}


static vector<Point> sampleContour( const Mat& image, int n=300 )
{

    vector<vector<Point>> contours;           
    vector<Point> all_points;                      
    findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
    for (size_t i=0; i <contours.size(); i++)
    {
        for (size_t j=0; j<contours[i].size(); j++)
        {
           all_points.push_back(contours[i][j]);
        }
    }

    int dummy=0;
    for (int add=(int)all_points.size(); add<n; add++)
    {
        all_points.push_back(all_points[dummy++]);
    }

    // shuffel
    random_shuffle(all_points.begin(), all_points.end());
    vector<Point> sampled;
    for (int i=0; i<n; i++)
    {
        sampled.push_back(all_points[i]);
    }
    return sampled;
}


int main(void)
{
    Mat img1, img2;
    vector<Point> img1Points, img2Points;
    float distSC, distHD;

    // read images
    string img1Path = "testimage.jpg";
    img1 = imread(img1Path, IMREAD_GRAYSCALE);
    rotateImage(img1, img2, 45);
    imshow("original", img1);
    imshow("transformed", img2);
    waitKey();

    // Contours
    img1Points = sampleContour(img1);
    img2Points = sampleContour(img2);

    //Calculate Distances
    Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
    Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();

    distSC = mysc->computeDistance( img1Points, img2Points );
    distHD = myhd -> computeDistance( img1Points, img2Points );

    cout << distSC << endl << distHD << endl;
    vector<DMatch> matches;
    Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
    transformerHD -> estimateTransformation(img1Points, img2Points, matches);
    return 0;
}

最佳答案

我在2D图像上使用了AffineTransformer类。以下是基本代码,可让您大致了解其功能。

// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;

int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};

int main()
{
    // Prepare 'vector of points' from above hardcoded points
    int sInd=0, eInd=arrSize;
    vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
    vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));

    // Create object of AffineTransformer
    bool fullAffine = true; // change its value and see difference in result
    auto aft = cv::createAffineTransformer(fullAffine);

    // Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
    std::vector<cv::DMatch> matches;
    for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));

    // Read image
    Mat srcImg = imread("image1.jpg");
    Mat tgtImg;

    // estimate points transformation
    aft->estimateTransformation(sourceP, tgtP, matches);
    // apply transformation
    aft->applyTransformation(sourceP, tgtP);
    // warp image
    aft->warpImage(srcImg, tgtImg);
    // show generated output
    imshow("warped output", tgtImg);
    waitKey(0);

    return 0;
}

关于opencv - OpenCV:如何使用AffineTransformer,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44434137/

相关文章:

c - 如何将元素 (cv::Point) 添加到共享数组中 - CUDA

java - 将我的代码(mac)中的绝对路径更改为我的电脑的相对路径

algorithm - 查找对应于总和列表的一组对

java - 匹配器和模式 Android

image - 为 4 channel 图像中的对象添加填充

c++ - all() matlab 函数 opencv

c++ - 从 haskell 调用 C opencv 函数

three.js - 如何将新的相机旋转与之前的变换组合起来?

computer-vision - 计算单应性给定两个相机之间的旋转和平移

用 R 数据框中的匹配 ID 替换单元格