c# - 使用 EmguCV 进行全景图像拼接

标签 c# opencv image-processing emgucv panoramas

我正在使用 Emgu CV(Open CV for C#)做一个关于图像全景拼接的项目。到目前为止,我已经完成了一些拼接图像的工作,但输出有点奇怪。这就是我得到的:

我的全景: enter image description here

这是 Emgu CV Stitcher.stitch 方法给出的: 由内置缝合器缝合

enter image description here

显然我遗漏了一些东西。此外,如果我添加更多图像,输出会像这样更有弹性:

enter image description here

我无法弄清楚我错过了什么。到目前为止,这是我的代码:

http://pastebin.com/Ke2Zz4m9

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using Emgu.CV.Util;
using Emgu.CV.GPU;


namespace Project
{
    public partial class Form1 : Form
    {
        public Form1()
        {
            InitializeComponent();
        }

        private void Form1_Load(object sender, EventArgs e)
        {
            Image<Bgr, float> one = new Image<Bgr, float>("D:\\Venice_panorama_part_01.jpg");
            Image<Bgr, float> two = new Image<Bgr, float>("D:\\Venice_panorama_part_02.jpg");
            Image<Bgr, float> third = new Image<Bgr, float>("D:\\Venice_panorama_part_03.jpg");
            Image<Bgr, float> fourth = new Image<Bgr, float>("D:\\Venice_panorama_part_04.jpg");
            Image<Bgr, float> fifth = new Image<Bgr, float>("D:\\Venice_panorama_part_05.jpg");
            Image<Bgr, float> sixth = new Image<Bgr, float>("D:\\Venice_panorama_part_06.jpg");
            Image<Bgr, float> seventh = new Image<Bgr, float>("D:\\Venice_panorama_part_07.jpg");
            Image<Bgr, float> eighth = new Image<Bgr, float>("D:\\Venice_panorama_part_08.jpg");



            Image<Bgr, Byte> result = FindMatch(two, third);
            result = convert(result);
            Image<Bgr, float> twoPlusThree = result.Convert<Bgr, float>();




            Image<Bgr, Byte> result2 = FindMatch(fourth, fifth);
            result2 = convert(result2);
            Image<Bgr, float> fourPlusFive = result2.Convert<Bgr, float>();



            Image<Bgr, Byte> result3 = FindMatch(sixth, seventh);
            result3 = convert(result3);
            Image<Bgr, float> sixPlusSeven = result3.Convert<Bgr, float>();



            Image<Bgr, Byte> result4 = FindMatch(one, twoPlusThree);
            result4 = convert(result4);
            Image<Bgr, float> oneTwoThree = result4.Convert<Bgr, float>();



            Image<Bgr, Byte> result5 = FindMatch(oneTwoThree, fourPlusFive);
            result5 = convert(result5);
            Image<Bgr, float> oneTwoThreeFourFive = result5.Convert<Bgr, float>();



            Image<Bgr, Byte> result6 = FindMatch(sixPlusSeven, eighth);
            result6 = convert(result6);
            Image<Bgr, float> sixSevenEigth = result6.Convert<Bgr, float>();



            Image<Bgr, Byte> result7 = FindMatch(oneTwoThreeFourFive, sixSevenEigth);

            result7 = convert(result7);

            result.Save("D:\\result1.jpg");
            result2.Save("D:\\result2.jpg");
            result3.Save("D:\\result3.jpg");
            result4.Save("D:\\result4.jpg");
            result5.Save("D:\\result5.jpg");
            result6.Save("D:\\result6.jpg");
            result7.Save("D:\\result7.jpg");
            this.Close();

        }

        public static Image<Bgr, Byte> FindMatch(Image<Bgr, float> fImage, Image<Bgr, float> lImage)
        {
            HomographyMatrix homography = null;
            SURFDetector surfCPU = new SURFDetector(500, false);


            int k = 2;
            double uniquenessThreshold = 0.8;
            Matrix<int> indices;

            Matrix<byte> mask;

            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Image<Gray, Byte> fImageG = fImage.Convert<Gray, Byte>();
            Image<Gray, Byte> lImageG = lImage.Convert<Gray, Byte>();

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(fImageG))
                //extract features from the object image
                using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
                {
                    modelKeyPoints = new VectorOfKeyPoint();
                    surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);

                    // extract features from the observed image
                    using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(lImageG))
                    using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                    using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                    using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
                    using (Stream stream = new Stream())
                    {
                        matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                        indices = new Matrix<int>(gpuMatchIndices.Size);
                        mask = new Matrix<byte>(gpuMask.Size);

                        //gpu implementation of voteForUniquess
                        using (GpuMat<float> col0 = gpuMatchDist.Col(0))
                        using (GpuMat<float> col1 = gpuMatchDist.Col(1))
                        {
                            GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                            GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                        }

                        observedKeyPoints = new VectorOfKeyPoint();
                        surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                        //wait for the stream to complete its tasks
                        //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                        stream.WaitForCompletion();

                        gpuMask.Download(mask);
                        gpuMatchIndices.Download(indices);

                        if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                        {
                            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                        }

                    }
                }
            }
            else
            {



                //extract features from the object image
                modelKeyPoints = new VectorOfKeyPoint();
                Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(fImageG, null, modelKeyPoints);


                // extract features from the observed image
                observedKeyPoints = new VectorOfKeyPoint();
                Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(lImageG, null, observedKeyPoints);
                BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix<int>(observedDescriptors.Rows, k);
                using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix<byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }
            }
            Image<Bgr, Byte> mImage = fImage.Convert<Bgr, Byte>();
            Image<Bgr, Byte> oImage = lImage.Convert<Bgr, Byte>();
            Image<Bgr, Byte> result = new Image<Bgr, byte>(mImage.Width + oImage.Width, mImage.Height);

            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = fImage.ROI;
                PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
                homography.ProjectPoints(pts);

                HomographyMatrix origin = new HomographyMatrix();                //I perform a copy of the left image with a not real shift operation on the origin
                origin.SetIdentity();
                origin.Data[0, 2] = 0;
                origin.Data[1, 2] = 0;
                Image<Bgr, Byte> mosaic = new Image<Bgr, byte>(mImage.Width + oImage.Width + 2000, mImage.Height*2);

                Image<Bgr, byte> warp_image = mosaic.Clone();

                mosaic = mImage.WarpPerspective(origin, mosaic.Width, mosaic.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_DEFAULT, new Bgr(0, 0, 0));


                warp_image = oImage.WarpPerspective(homography, warp_image.Width, warp_image.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Bgr(200, 0, 0));
                Image<Gray, byte> warp_image_mask = oImage.Convert<Gray, byte>();
                warp_image_mask.SetValue(new Gray(255));
                Image<Gray, byte> warp_mosaic_mask = mosaic.Convert<Gray, byte>();
                warp_mosaic_mask.SetZero();
                warp_mosaic_mask = warp_image_mask.WarpPerspective(homography, warp_mosaic_mask.Width, warp_mosaic_mask.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Gray(0));
                warp_image.Copy(mosaic, warp_mosaic_mask);

                return mosaic;
            }
            return null;
        }

        private Image<Bgr, Byte> convert(Image<Bgr, Byte> img)
        {
            Image<Gray, byte> imgGray = img.Convert<Gray, byte>();
            Image<Gray, byte> mask = imgGray.CopyBlank();

            Contour<Point> largestContour = null;
            double largestarea = 0;

            for (var contours = imgGray.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                RETR_TYPE.CV_RETR_EXTERNAL); contours != null; contours = contours.HNext)
            {
                if (contours.Area > largestarea)
                {
                    largestarea = contours.Area;
                    largestContour = contours;
                }
            }
            CvInvoke.cvSetImageROI(img, largestContour.BoundingRectangle);
            return img;
        }
    }
}

最佳答案

其实你的代码没有任何问题,这张图是完全正确的。请注意,当您将所有图像拼接在一起时,您将第一张(左)图像作为引用平面并将其设置为正面方向,所有后续图像最初朝向右方向被投影到平面上正面。想象一下你坐在一个房间里,你前面的墙看起来是长方形的,而你右边的那堵墙可能看起来是梯形的。这是因为所谓的“透视失真”/单应性,水平视角越大,这种现象越明显。 因此,如果打算拼接一系列覆盖广视角的图像,他通常会尝试使用圆柱面或球面,而不是平面。您可以通过搜索引用手册找到此选项。

关于c# - 使用 EmguCV 进行全景图像拼接,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/20181069/

相关文章:

c# - Xamarin 表单选取器中的 System.ArgumentOutOfRangeException

android - qt pro文件中的libopencv_java3.so

java - 从文本中提取矩形中的单词

java - 在 OpenCV 中将 32FC3 转换为 8UC3 以将图像另存为 jpg 时出现问题

python - 如何在OpenCV中构造二值图像的水平投影

c# - 将 XmlElement 转换为元素为 ("ElementName"的 XElement )不返回任何结果

c# - 在 c# dll 中调用 delphi5 过程

matlab - Matlab函数 “m = size(X,dim)”在opencv中等效

c# - ListItem的方法重载匹配,参数无效

java - 1bpp 图像中的简单形状检测