c++ - OpenCV C++ 创建可重用的关键点和描述符集,用于拼接多个图像

标签 c++ opencv descriptor image-stitching keypoint

我已经创建了一个可以将多张图像拼接在一起的程序,现在正在寻求提高它的效率。根据拼接图像的大小,最终它会变得很大并且包含太多关键点,机器会耗尽可分配内存。为了弥补这一点,我的目标是存储所有找到的关键点和描述符,这样我就不需要在主拼接图像中再次找到它们,只需要在正在拼接的新图像中找到它们。我有这个过程在 python 中工作,但在 C++ 中没有同样的运气。 为此,我需要执行 perspectiveTransform()在关键点上,因此将它们从 vector<keypoint> 转换为至 vector<point2f>回到vector<keypoint> .我已经能够做到这一点并且可以确认它有效(选择跟随)。 我不确定是否需要对描述符执行相同的过程(目前我已经这样做了,但要么是错误的,要么是无效的)。

问题:当我运行它时,关键点和描述符似乎不起作用,并且我抛出一个我创建的错误:“找不到足够的匹配项”,即使我知道至少关键点正在进入函数。

这是关键点和描述符转换的代码。该代码首先计算要应用于第一个图像的 warpPerspective,因为单应性将仅扭曲第二个图像。 codd 的其余部分处理关键点和描述符。

tuple<Mat, vector<KeyPoint>, Mat>  stitchMatches(Mat image1,Mat image2, Mat homography, vector<KeyPoint> kp1, vector<KeyPoint> kp2 , Mat desc1, Mat desc2){
    Mat result, destination, descriptors_updated;
    vector<Point2f> fourPoint;
    vector<KeyPoint> keypoints_updated;

    //-Get the four corners of the first image (master)
    fourPoint.push_back(Point2f (0,0));
    fourPoint.push_back(Point2f (image1.size().width,0));
    fourPoint.push_back(Point2f (0, image1.size().height));
    fourPoint.push_back(Point2f (image1.size().width, image1.size().height));
    //perspectiveTransform(Mat(fourPoint), destination, homography);


    //- Get points used to determine Htr
    double min_x, min_y, tam_x, tam_y;
    float min_x1, min_x2, min_y1, min_y2, max_x1, max_x2, max_y1, max_y2;
    min_x1 = min(fourPoint.at(0).x, fourPoint.at(1).x);
    min_x2 = min(fourPoint.at(2).x, fourPoint.at(3).x);
    min_y1 = min(fourPoint.at(0).y, fourPoint.at(1).y);
    min_y2 = min(fourPoint.at(2).y, fourPoint.at(3).y);
    max_x1 = max(fourPoint.at(0).x, fourPoint.at(1).x);
    max_x2 = max(fourPoint.at(2).x, fourPoint.at(3).x);
    max_y1 = max(fourPoint.at(0).y, fourPoint.at(1).y);
    max_y2 = max(fourPoint.at(2).y, fourPoint.at(3).y);
    min_x = min(min_x1, min_x2);
    min_y = min(min_y1, min_y2);
    tam_x = max(max_x1, max_x2);
    tam_y = max(max_y1, max_y2);

    //- Htr use to map image one to result in line with the alredy warped image 1
    Mat Htr = Mat::eye(3,3,CV_64F);
    if (min_x < 0){
        tam_x = image2.size().width - min_x;
        Htr.at<double>(0,2)= -min_x;
    }
    if (min_y < 0){
        tam_y = image2.size().height - min_y;
        Htr.at<double>(1,2)= -min_y;
    }

    result = Mat(Size(tam_x*2,tam_y*2), CV_8UC3,cv::Scalar(0,0,0));
    warpPerspective(image2, result, Htr, result.size(), INTER_LINEAR, BORDER_TRANSPARENT,   0);
    warpPerspective(image1, result, (Htr*homography), result.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);



    //-- Variables to hold the keypoints at the respective stages
    vector<Point2f> kp1Local,kp2Local;
    vector<KeyPoint> kp1updated, kp2updated;


    //Localize the keypoints to allow for perspective change
    KeyPoint::convert(kp1, kp1Local);
    KeyPoint::convert(kp2, kp2Local);

    //perform persepctive transform on the keypoints of type vector<point2f>
    perspectiveTransform(kp1Local, kp1Local, (Htr));
    perspectiveTransform(kp2Local, kp2Local, (Htr*homography));


    //convert keypoints back to type vector<keypoint>
    for( size_t i = 0; i < kp1Local.size(); i++ ) {
        kp1updated.push_back(KeyPoint(kp1Local[i], 1.f));
    }
    for( size_t i = 0; i < kp2Local.size(); i++ ) {
        kp2updated.push_back(KeyPoint(kp2Local[i], 1.f));
    }

    //Add to master of list of keypoints to be passed along during next iteration of image
    keypoints_updated.reserve(kp1updated.size() + kp2updated.size());
    keypoints_updated.insert(keypoints_updated.end(),kp1updated.begin(),kp1updated.end());
    keypoints_updated.insert(keypoints_updated.end(),kp2updated.begin(),kp2updated.end());

    //WarpPerspective of decriptors to match that of the images and cooresponding keypoints
    Mat desc1New, desc2New;
    warpPerspective(desc2, desc2New, Htr, result.size(), INTER_LINEAR, BORDER_TRANSPARENT,   0);
    warpPerspective(desc1, desc1New, (Htr*homography), result.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);

    //create a new Mat including the descriports from desc1 and desc2
    descriptors_updated.push_back(desc1New);
    descriptors_updated.push_back(desc2New);


    //------------TEST TO see if keypoints have moved

    Mat img_keypoints;
    drawKeypoints( result, keypoints_updated, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

    imshow("Keypoints 1", img_keypoints );
    waitKey();
    destroyAllWindows();



    return {result, keypoints_updated, descriptors_updated};
}

以下代码是我的主拼接程序,它执行实际的拼接。

tuple<Mat,vector<KeyPoint>,Mat> stitch(Mat img1,Mat img2 ,vector<KeyPoint> keypoints, Mat descriptors, String featureDetection,String featureExtractor,String keypointsMatcher,String showMatches){

    Mat desc, desc1, desc2, homography, result, croppedResult,descriptors_updated;
    std::vector<KeyPoint> keypoints_updated, kp1, kp2;
    std::vector<DMatch> matches;
    //-Base Case[2]
    if (keypoints.empty()){

        //-Detect Keypoints and their descriptors
        tie(kp1,desc1) = KeyPointDescriptor(img1, featureDetection,featureExtractor);
        tie(kp2,desc2) = KeyPointDescriptor(img2, featureDetection,featureExtractor);

        //Find matches and calculated homography based on keypoints and descriptors
        std::tie(matches,homography) = matchFeatures(kp1,  desc1,kp2, desc2, keypointsMatcher);
        //draw matches if requested
        if(showMatches == "true"){
            drawMatchedImages( img1, kp1, img2, kp2, matches);
        }
        //stitch the images and update the keypoint and descriptors
        std::tie(result,keypoints_updated,descriptors_updated) = stitchMatches(img1, img2, homography,kp1,kp2,desc1,desc2);
        //crop function using created cropping function
        croppedResult = crop(result);
        return {croppedResult,keypoints_updated,descriptors_updated};

    }

    //base case[3:n]
    else{

        //Get keypoints and descriptors of new image and add to respective lists
        tie(kp2,desc2) = KeyPointDescriptor(img2, featureDetection,featureExtractor);

        //find matches and determine homography
        std::tie(matches,homography) = matchFeatures(keypoints_updated,descriptors_updated,kp2,desc2, keypointsMatcher);
        //draw matches if requested
        if(showMatches == "true")
            drawMatchedImages( img1, keypoints, img2, kp2, matches);

        //stitch the images and update the keypoint and descriptors
        tie(result,keypoints_updated,descriptors_updated) = stitchMatches(img1, img2, homography,keypoints,kp2,descriptors,desc2);
        //crop function using created cropping function
        croppedResult = crop(result);
        return {croppedResult,keypoints_updated,descriptors_updated};
        }
}

最后是要转换到拼接图像上的关键点图像。非常感谢任何帮助!

enter image description here

最佳答案

梳理之后,我碰巧发现我有一次使用了错误的变量!:)

关于c++ - OpenCV C++ 创建可重用的关键点和描述符集,用于拼接多个图像,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/44936653/

相关文章:

c++ - 如何在 directshow 中开发视频组合器/生成器过滤器

c++ - 链接 QtCreator && OpenCv

JavaScript:属性描述符在传递给函数时被忽略

c++ - 如何绘制非客户区?

c++ - memset, memcpy with new 运算符

python - 有没有好的去除线条的方法?

image - 检测图像上的硬币(并拟合椭圆)

java - "descriptor"和 "signature"有什么区别?

vector - 什么是使用 k 均值的矢量量化?

c++ - 在模板中比较 == !=