c++ - 将两个图像拼接在一起的问题

标签 c++ image opencv

我正在使用 C++ 在 Opencv 中编程,但在重叠点扭曲两张图像时遇到了一些困难。我使用的是标准类型方法:检测关键点、提取描述符、匹配描述符、查找单应性、使用单应性将图像 2 映射到图像 1 的引用中,然后将两个图像拼接在一起。

代码在最终图像下方是http://madda99.imgur.com/all/ .非常感谢任何关于如何对齐两个图像的建议/帮助。

#include <iostream>
#include <stdio.h>      /* printf */
#include <time.h>
#include <Windows.h>

#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
//#include <cv.h>

using namespace cv;
using namespace std;


/** @function main */
int main( int argc, char** argv )
{
// if( argc != 3 )
 //{ readInImages(); return -1; }



    Mat image1 = imread(argc == 2 ? argv[1] : "sat1.png", 1);
      if (image1.empty())
      {
        cout << "Cannot open image!" << endl;
        return -1;
      }
      imshow("image", image1);
      waitKey(0);
     // return 0;

      Mat image2 = imread(argc == 2 ? argv[2] : "sat2.png", 1);
           if (image2.empty())
         {
         cout << "Cannot open image!" << endl;
         return -1;
         }
         imshow("image", image2);
         waitKey(0);



Mat img_gray1 = image1.clone();
Mat img_gray2 = image2.clone();

//Mat gray_image2;

cvtColor(image1, img_gray1, CV_RGB2GRAY);
cvtColor(image2, img_gray2, CV_RGB2GRAY);

imshow("image",img_gray1);
waitKey(0);
imshow("image",img_gray2);
waitKey(0);


if( !img_gray1.data || !img_gray2.data )
 { std::cout<< " --(!) Error reading images " << std::endl; return -1; }



//-- Step 1: Detect the keypoints using SURF Detector
 int minHessian = 2000;

SurfFeatureDetector detector( minHessian );

std::vector< KeyPoint > keypoints1, keypoints2;
vector<int> values;

detector.detect( img_gray1, keypoints1 );
detector.detect( img_gray2, keypoints2 );



//-- Step 2: Calculate descriptors (feature vectors)

SurfDescriptorExtractor extractor;

Mat descriptors_keypoints1, descriptors_keypoints2;

extractor.compute( img_gray1, keypoints1, descriptors_keypoints1 );
extractor.compute( img_gray2, keypoints2, descriptors_keypoints2 );


//-- Step 3: Matching descriptor vectors using FLANN matcher
if ( descriptors_keypoints1.empty() ) {
    cout << "Empty!!!!" << endl;}

//cvError(0,"MatchFinder","1st descriptor empty",__FILE__,__LINE__);

    //cvError(0,"MatchFinder","1st descriptor empty",__FILE__,__LINE__);
if ( descriptors_keypoints2.empty() ) {
    cout << "Empty!!!!" << endl;
}
  // cvError(0,"MatchFinder","2nd descriptor empty",__FILE__,__LINE__);

FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_keypoints1, descriptors_keypoints2, matches);


float dif = difftime (end,start);
printf ("Elapsed time is %f seconds.", dif );



//-- Draw Matches
Mat target;
   drawMatches(image1,keypoints1,image2,keypoints2,matches,target);
   imshow("Matches", target);
   waitKey(0);


   double max_dist = 0; double min_dist = 100;

 //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_keypoints1.rows; i++ )
    { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
    }

   printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

   //-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;

   for( int i = 0; i < descriptors_keypoints1.rows; i++ )
    { if( matches[i].distance <= 3*min_dist )
    { good_matches.push_back( matches[i]); }
    }
    std::vector< Point2f > obj;
    std::vector< Point2f > scene;

   cout << descriptors_keypoints1<< endl << " "  << descriptors_keypoints1 << endl << endl;

    cout.setf( std::ios::fixed, std::ios::floatfield );
    cout.precision(1);
    cout << descriptors_keypoints1 << endl;

   for( int i = 0; i < good_matches.size(); i++ )
    {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
    }


   cout << obj.size() << endl;
   cout << scene.size() << endl;


   // Find the Homography Matrix
    //Mat H = findHomography(scene,obj, CV_RANSAC);
   cv:: Mat H = cv::findHomography(scene,obj, CV_RANSAC);
   // Use the Homography Matrix to warp the images
    cv::Mat result;


        Mat warpImage2;
        warpPerspective(image2, warpImage2, H, Size(image2.cols, image2.rows), INTER_CUBIC);



    cv::Mat result1;
     warpPerspective(image2,result1,H,cv::Size(image1.cols+image2.cols,image1.rows));
     cv::Mat half1(result1,cv::Rect(0,0,image2.cols,image2.rows));
     image2.copyTo(half1);
     imshow( "Two image Mosaic", result1 );


    waitKey(0);
    return 0;
    }

最佳答案

关于我的评论,这里有一段使用 OpenCV 拼接类的代码片段:

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/stitcher.hpp"

int main( int argc, char** argv )
{
    std::vector<cv::Mat> images;
    cv::Mat stitchedImage;
    cv::Stitcher stitcher = Stitcher::createDefault(true);

    //... Get your images (image1, image2) using command line parameters        


    //Stitch all images together, additionally you can add status/error handling
    images.push_back(image1);
    images.push_back(image2);
    stitcher.stitch(images, stitchedImage);

    cv::imshow("Stitched images", stitchedImage);
    cv::waitKey(0);
}

如您所见,这是非常高级的编程,几乎涵盖了上述所有步骤。

关于c++ - 将两个图像拼接在一起的问题,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/25440894/

相关文章:

c++ - Prewitt 滤波器、边缘检测

c++ - 在 Xcode 中使用 boost 库编译项目时出错

c# - 动态显示数百张图片

html - 将鼠标悬停在 anchor 标记内的图像上并且 anchor 标记位于 div 内时,如何显示文本?

c++ - 有没有办法使用opencv检测图像中的相邻圆

c++ - 在 SVM opencv c++ 中标记数据

java - 如何从 C++ 代码 (android) 调用非静态 Java 方法?

c++ - 我可以让 clang 生成函数指针的绝对地址吗?

c++ - 为什么 winmain 不设置错误级别?

python - cv2.imshow() 在自定义函数中不起作用