c++ - 如何在 C++ 中使用 OpenCV 检测多个对象?

标签 c++ opencv k-means opencv3.0 mean-shift

我从这个答案中得到灵感 here ,这是一个 Python 实现,但我需要 C++,这个答案效果很好,我的想法是:detectAndCompute 得到 keypoints,使用 kmeans 将它们分割成簇,然后为每个簇做 matcher->knnMatch 与每个 descriptors,然后做其他的东西,比如常见的单一检测方法。主要问题是,如何为每个集群的matcher->knnMatch进程提供descriptors?我认为我们应该将其他 keypoints 对应的 descriptor 的值设置为 0(无用),对吗? 在我的尝试中遇到了一些问题:

  1. 如何估计 kmeans 的簇数?
  2. 为什么可以为这样的集群创建 Mat 数组 Mat descriptors_scene_clusters[3] = { Mat(descriptors_scene.rows, descriptors_scene.cols, CV_8U, Scalar(0)) };

非常感谢任何帮助!


output

#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/xfeatures2d.hpp>

using namespace cv;
using namespace cv::xfeatures2d;

#define MIN_MATCH_COUNT 10

int main()
{
    Mat img_object = imread("./2.PNG", IMREAD_GRAYSCALE);
    Mat img_scene = imread("./1.PNG", IMREAD_GRAYSCALE);

    Ptr<ORB> detector = ORB::create();
    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    Mat descriptors_object, descriptors_scene;
    detector->detectAndCompute(img_object, cv::Mat(), keypoints_object, descriptors_object);
    detector->detectAndCompute(img_scene, cv::Mat(), keypoints_scene, descriptors_scene);


    std::cout << descriptors_scene.row(0) << "\n";
    std::cout << descriptors_scene.cols << "\n";


    std::vector<Point2f> keypoints_scene_points_;
    for (int i=0; i<keypoints_scene.size(); i++) {
        keypoints_scene_points_.push_back(keypoints_scene[i].pt);
    }
    Mat keypoints_scene_points(keypoints_scene_points_);

    Mat labels;
    int estimate_cluster_count = 3; // estimated ??????????
    kmeans(keypoints_scene_points, estimate_cluster_count, labels, TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 1.0), 3, KMEANS_RANDOM_CENTERS);

    std::cout << "==================================111111\n";

    Mat descriptors_scene_clusters[3] = { Mat(descriptors_scene.rows, descriptors_scene.cols, CV_8U, Scalar(0)) };

    std::cout << "==================================111111------\n";

    for (int i=0; i<labels.rows; i++) {
        int clusterIndex = labels.at<int>(i);
        Point2f pt = keypoints_scene_points.at<Point2f>(i);
        descriptors_scene_clusters[clusterIndex].at<uchar>(pt) = descriptors_scene.at<uchar>(pt);  // ?????? error
    }

    std::cout << descriptors_scene_clusters[0] << "\n";
    std::cout << "==================================22222222\n";
    // return 0;

    Mat img_matches = img_scene;
    std::vector<DMatch> all_good_matches;
    for (int i=0; i<estimate_cluster_count; i++) {
        std::cout << "==================================33333\n";

        Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>(5);
        Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>(50);
        Ptr<FlannBasedMatcher> matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
        // BFMatcher matcher;
        std::vector<std::vector<DMatch>> matches;

        std::cout << "==================================444444\n";
        matcher->knnMatch(descriptors_object, descriptors_scene_clusters[i], matches, 2);
        std::cout << "==================================555555\n";
        std::vector<DMatch> good_matches;

        for (auto &match : matches) {
            if (match[0].distance < 0.7 * match[1].distance) {
                good_matches.push_back(match[0]);
            }
        }

        all_good_matches.insert(all_good_matches.end(), good_matches.begin(), good_matches.end());

        std::cout << "==================================66666\n";

        if (good_matches.size() > MIN_MATCH_COUNT) {

            //-- Localize the object
            std::vector<Point2f> obj;
            std::vector<Point2f> scene;

            for (auto &match : good_matches) {
                //-- Get the keypoints from the good matches
                obj.push_back(keypoints_object[match.queryIdx].pt);
                scene.push_back(keypoints_scene[match.trainIdx].pt);
            }

            Mat H = findHomography(obj, scene, RANSAC);

            //-- Get the corners from the image_1 ( the object to be "detected" )
            std::vector<Point2f> obj_corners(4);
            obj_corners[0] = cvPoint(0, 0);
            obj_corners[1] = cvPoint(img_object.cols, 0);
            obj_corners[2] = cvPoint(img_object.cols, img_object.rows);
            obj_corners[3] = cvPoint(0, img_object.rows);
            std::vector<Point2f> scene_corners(4);

            perspectiveTransform(obj_corners, scene_corners, H);

            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0),
                 scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
            line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0),
                 scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
            line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0),
                 scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
            line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0),
                 scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);

            print(scene_corners);
        }
    }

    drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
                    all_good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                    std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);


    //-- Show detected matches
    imshow("Good Matches & Object detection", img_matches);

    waitKey(0);
    return 0;
}

最佳答案

您可以使用简单的集群器直接计算集群、查找集群的数量和/或 kmeans 的集群中心初始化。下面是一个可能的凝聚聚类器实现,它将更接近指定距离的点组合在一起 - 请参见构造函数中的参数 dist。在您的情况下, dist 可以是小图像中关键点之间的最大距离。

头文件:

class PointsClusterer {
public:
     PointsClusterer (int dist, std::vector <cv::Point2f> points);
     bool cluster();
     std::map <int, std::vector <cv::Point2f>> getClusters ();

private:

    double distance (int i, int j);
    void merge (int i, int j);

private:

    std::vector <cv::Point2f> m_Points;
    std::map <int, std::vector <int>> m_Clusters;
    std::map <int, cv::Point2f> m_Sums;
    int m_dist = 0;
};

Cpp 文件:

PointsClusterer::PointsClusterer (int dist, std::vector <cv::Point2f> points) :
m_dist(dist), m_Points (points)
{}

bool PointsClusterer::cluster()
{
//initialization
    for (int i = 0; i < m_Points.size(); ++i)
    {
        clusters[i] = std::vector<int>(1, i);
        sum_clusters[i] = m_Points[i];
    }

    bool still_merge = true;
    //Merge clusters
    while (still_merge)
    {
        still_merge = false;
        bool break_i = false;

        for (int i=0; i < m_Clusters.size () && !break_i ;++i)
        for (int j=i+1; j < m_Clusters.size ();++j)
        {
            if (distance(i, j) < m_dist)
            {
                merge(i, j);
                break_i = true;
                still_merge = true;
                break;
            }
        }
    }
//final conversion to std::map <int, std::vector <cv::Point2f>> is missing

}


void PointsClusterer::merge(int i, int j)
{
    auto it = m_Clusters.begin();
    auto iti = it+i;
    auto itj = it+j;

    for (val : itj->second)
    {
         iti->second.push_back(val);
         m_Sums[iti->first]+=m_Points[val];
    }
    m_Clusters.erase(itj);
}

double PointsClusterer::distance(int i, int j)
{
    auto it = m_Clusters.begin();
    auto iti = it + i;
    auto itj = it + j;
    auto vali = m_Sums[iti->first] / iti->second.size();
    auto valj = m_Sums[itj->first] / itj->second.size();
    return cv::norm(vali - valj);
}

集群方法的实现过于简单,因此它的工作原理一目了然。它的性能可以提高,但我认为这超出了您的问题范围。

关于c++ - 如何在 C++ 中使用 OpenCV 检测多个对象?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52425355/

相关文章:

r - K-means:初始中心不明显

c++ - 某些相关文件类型(.zip、.rar、...)的文件删除机制

c++ - Ubuntu virtualbox 中的 OpenGL 多线程示例

python - Python/OpenCV 中的对比度拉伸(stretch)

C++。将 .dll 文件链接到项目

csv - 0.6 中缺少 Apache Mahout Math VectorWritable?

c++ - 使用 ldap_get_next_page_s 进行 LDAP 排序会导致错误 LDAP_UNAVAILABLE_CRIT_EXTENSION

C++ GetProcAddress 64 位返回 32 位地址

python - OpenCV Hough行不显示

python - Sklearn.KMeans() : Get class centroid labels and reference to a dataset