c# - Emgu CV SURF 获取匹配点坐标

标签 c# image opencv image-processing emgucv

我正在使用 Emgu CV 的 SURF 功能来识别图像中的相似对象。

图像画对了,显示了所有找到的关键点,在两个图像中,相似点(这就是我想要的)和一个矩形(通常是矩形,有时只是一条线)覆盖相似点。

问题是图像中看到了相似点,但是它们没有以我想要的格式保存,实际上,它们被保存在一个VectorOfKeyPoint object,它只是存储了一个指针,其他的内存数据,其中的点存储在内存中(我是这么认为的)。意思是,我无法像这样成对地得到相似点:

((img1X, img1Y), (img2X, img2Y))

这就是我要找的东西,这样我以后就可以使用积分了。 现在,我只能看到结果图像中的点,但看不到 他们成对。

我使用的代码是来自 Emgu CV 的示例。

//----------------------------------------------------------------------------
//  Copyright (C) 2004-2016 by EMGU Corporation. All rights reserved.       
//----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
#if !__IOS__
using Emgu.CV.Cuda;
#endif
using Emgu.CV.XFeatures2D;

namespace FirstEmgu
{

    public static class DrawMatches
    {
  // --------------------------------
  // ORIGINAL FUNCTION FROM EXAMPLE
  // --------------------------------
        private static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh = 300;

            Stopwatch watch;
            homography = null;

            modelKeyPoints = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

#if !__IOS__
            if (CudaInvoke.HasCuda)
            {
                CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
                using (GpuMat gpuModelImage = new GpuMat(modelImage))
                //extract features from the object image
                using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
                {
                    surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                    using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                    using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                    //using (GpuMat tmp = new GpuMat())
                    //using (Stream stream = new Stream())
                    {
                        matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                        surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                        int nonZeroCount = CvInvoke.CountNonZero(mask);
                        if (nonZeroCount >= 4)
                        {
                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                               matches, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                   observedKeyPoints, matches, mask, 2);
                        }
                    }
                    watch.Stop();
                }
            }
            else
#endif
            {
                using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
                {
                    SURF surfCPU = new SURF(hessianThresh);
                    //extract features from the object image
                    UMat modelDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    UMat observedDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                           matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                               observedKeyPoints, matches, mask, 2);
                    }

                    watch.Stop();
                }
            }
            matchTime = watch.ElapsedMilliseconds;
        }
        // --------------------------------
        // ORIGINAL FUNCTION FROM EXAMPLE
        // --------------------------------
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)
        {
            Mat homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
            {
                Mat mask;
                FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
                   out mask, out homography);

                //Draw the matched keypoints
                Mat result = new Mat();
                Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                   matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

                #region draw the projected region on the image

                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[] pts = new PointF[]
               {
                  new PointF(rect.Left, rect.Bottom),
                  new PointF(rect.Right, rect.Bottom),
                  new PointF(rect.Right, rect.Top),
                  new PointF(rect.Left, rect.Top)
               };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

                    Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
                    using (VectorOfPoint vp = new VectorOfPoint(points))
                    {
                        CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                    }

                }

                #endregion

                return result;

            }
        }

        // ----------------------------------
        // WRITTEN BY MYSELF
        // ----------------------------------
        // Returns 4 points (usually rectangle) of similar points
        // but can't be used, since sometimes this is a line (negative 
        // points)
        public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
        {
            Mat homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
            {
                Mat mask;
                FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
                   out mask, out homography);

                //Draw the matched keypoints
                Mat result = new Mat();
                Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                   matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

                Point[] points = null;
                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[] pts = new PointF[]
               {
                  new PointF(rect.Left, rect.Bottom),
                  new PointF(rect.Right, rect.Bottom),
                  new PointF(rect.Right, rect.Top),
                  new PointF(rect.Left, rect.Top)
               };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

                    points = Array.ConvertAll<PointF, Point>(pts, Point.Round);

                }

                return points;
            }
        }
    }
}

编辑

我已经设法从这样的匹配对象中得到一些分数:

Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                   matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

                for (int i = 0; i < matches.Size; i++)
                {
                    var a = matches[i].ToArray();
                    foreach (var e in a)
                    {
                        Point p = new Point(e.TrainIdx, e.QueryIdx);
                        Console.WriteLine(string.Format("Point: {0}", p));
                    }
                    Console.WriteLine("-----------------------");
                }

我认为这应该让我得到分数。我设法让它在 python 中工作,代码也没有太大的不同。问题是返回的点太多了。事实上,这会返回 Y 上的所有点。

示例

(45, 1), (67, 1)

(656, 2), (77, 2)

...

它没有给我想要的分数,即使我可能很接近。任何建议表示赞赏。

编辑 2 本题:Find interest point in surf Detector Algorithm与我需要的非常相似。只有一个答案,但没有说明如何获取匹配点坐标。这就是我需要的,如果两个图像中都有一个对象,则从两个图像中获取对象点的坐标。

最佳答案

坐标不是由 TrainIdx 和 QueryIdx 组成的,它们是 KeyPoints 的索引。这将给出模型和观察到的图像之间匹配的像素坐标。

for (int i = 0; i < matches.Size; i++)
{
    var arrayOfMatches = matches[i].ToArray();
    if (mask.GetData(i)[0] == 0) continue;
    foreach (var match in arrayOfMatches)
    {
        var matchingModelKeyPoint = modelKeyPoints[match.TrainIdx];
        var matchingObservedKeyPoint = observedKeyPoints[match.QueryIdx];
        Console.WriteLine("Model coordinate '" + matchingModelKeyPoint.Point + "' matches observed coordinate '" + matchingObservedKeyPoint.Point + "'.");
    }
}

arrayOfMatches中的项数等于K的值,我的理解是距离最近的匹配最好。

关于c# - Emgu CV SURF 获取匹配点坐标,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/36269038/

相关文章:

c# - 上传图片并相应地在 gridview 中显示其缩略图

python - 使用aruco估算相机的世界位置

c# - 无法测试 SmtpClient.TimeOut

java - 使用 Java 模糊图像?

html - 内部带有 HTML 的 PHP for 循环无法正常运行

qt - 静态安装 Qt4 以便 OpenCV 可以检测到已安装的 Qt4 库

c++ - OpenCV - 如何使用 Eclipse C++ IDE 将在/home 中编译的 opencv 库优先于来自/usr/lib/的那些

c# - 为什么 'await'会改变WinForms program.cs中的线程上下文

c# - 调整浏览器窗口大小时如何停止页面刷新(跳转)?

c# - 整个解决方案的Visual Studio快速切换平台