c# - 将深度图像与 RGB 图像对齐

标签 c# kinect

很长一段时间以来,我一直在尝试将深度流与来自 Kinect 的 RGB 流对齐。我已经阅读了几篇关于它的文章,但我一定遗漏了一些关键点,因为我无法让它工作......

这是我设法做到的图像,我圈出了一些容易发现的错位 Kinect Depth misalignment

我一直在努力将其简化为尽可能小的代码,但它仍然是一个很好的代码块,所以请耐心等待,下面的代码片段是 Kinect SDK 在每次有深度和 RGB 时调用的代码框架准备就绪。

如你所见,我一直在尝试

ColorImagePoint colorpoint = _Sensor.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, depthpoint, ColorImageFormat.RgbResolution640x480Fps30);

我更愿意使用 CoordinateMapper.MapDepthFrameToColorFrame(因为这应该能够解决问题)但我无法让它工作..不过我可能没有正确地做..

我正在使用 Microsoft 的 Kinect SDK 1.6

private void EventAllFramesReady(Object Sender, AllFramesReadyEventArgs e)
{
    System.Drawing.Color color;

    Bitmap image = null;
    Bitmap depth = null;

    using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
    {
        using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
        {
            // color 
            image = new Bitmap(colorFrame.Width, colorFrame.Height);
            byte[] colorPixels = new byte[colorFrame.PixelDataLength];
            colorFrame.CopyPixelDataTo(colorPixels);

            //lock bitmap, and work with BitmapData (way faster than SetPixel())
            BitmapData imageBitmapData = image.LockBits(new Rectangle(0, 0, image.Width, image.Height),
                                                ImageLockMode.WriteOnly,
                                                image.PixelFormat);
            IntPtr IptrImage = imageBitmapData.Scan0;
            byte[] PixelsImage = new byte[image.Width * image.Height * 4];


            // depth
            depth = new Bitmap(depthFrame.Width, depthFrame.Height);
            DepthImagePixel[] depthData = new DepthImagePixel[depthFrame.PixelDataLength];
            depthFrame.CopyDepthImagePixelDataTo(depthData);

            //lock bitmap, and work with BitmapData (way faster than SetPixel())
            BitmapData depthBitmapData = depth.LockBits(new Rectangle(0, 0, depth.Width, depth.Height),
                                                ImageLockMode.WriteOnly,
                                                depth.PixelFormat);
            IntPtr IptrDepth = depthBitmapData.Scan0;
            byte[] PixelsDepth = new byte[depth.Width * depth.Height * 4];


            DepthImagePoint depthpoint = new DepthImagePoint();

            for (int x = 1; x < colorFrame.Width; x++)
            {
                for (int y = 1; y < colorFrame.Height; y++)
                {
                    int i = ((y * image.Width) + x) * 4;

                    short depthdistanceRAW = (depthData[x + y * depth.Width]).Depth;

                    // convert distance value into a color
                    color = System.Drawing.Color.Pink;
                    if (depthdistanceRAW > 0 && depthdistanceRAW <= 4000)
                    {
                        int depthdistance = (int)((depthdistanceRAW / 4090f) * 255f);
                        color = System.Drawing.Color.FromArgb((int)(depthdistance / 2f), depthdistance, (int)(depthdistance * 0.7f));
                    }

                    depthpoint.X = x;
                    depthpoint.Y = y;
                    depthpoint.Depth = depthdistanceRAW;

                    ColorImagePoint colorpoint = _Sensor.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, depthpoint, ColorImageFormat.RgbResolution640x480Fps30);

                    //if (colorpoint.X > 0 && colorpoint.X <= 640 && colorpoint.Y > 0 && colorpoint.Y <= 480)
                    //{
                    int adjustedposition = ((colorpoint.Y * image.Width) + colorpoint.X) * 4;
                    //if (adjustedposition < depthData.Length)
                    //{
                    PixelsDepth[i] = color.B;
                    PixelsDepth[i + 1] = color.G;
                    PixelsDepth[i + 2] = color.R;
                    PixelsDepth[i + 3] = DepthTransparency;
                    //}
                    //}

                    PixelsImage[i] = colorPixels[i];
                    PixelsImage[i + 1] = colorPixels[i + 1];
                    PixelsImage[i + 2] = colorPixels[i + 2];
                    PixelsImage[i + 3] = 255;
                }
            }

            Marshal.Copy(PixelsImage, 0, IptrImage, PixelsImage.Length);
            image.UnlockBits(imageBitmapData);

            Marshal.Copy(PixelsDepth, 0, IptrDepth, PixelsDepth.Length);
            depth.UnlockBits(depthBitmapData);
        }
    }

    _kf.UpdateImage(image); // update the RGB picture in the form
    _kf.UpdateDepth(depth); // update the Depth picture in the form
}

最佳答案

我发布了对 similar question here 的回答.

Coordinate Mapping Basics-WPF C# Sample显示如何使用此方法。您可以在下面找到代码的一些重要部分:

// Intermediate storage for the depth data received from the sensor
private DepthImagePixel[] depthPixels;
// Intermediate storage for the color data received from the camera
private byte[] colorPixels;
// Intermediate storage for the depth to color mapping
private ColorImagePoint[] colorCoordinates;
// Inverse scaling factor between color and depth
private int colorToDepthDivisor;
// Format we will use for the depth stream
private const DepthImageFormat DepthFormat = DepthImageFormat.Resolution320x240Fps30;
// Format we will use for the color stream
private const ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;

//...

// Initialization
this.colorCoordinates = new ColorImagePoint[this.sensor.DepthStream.FramePixelDataLength];
this.depthWidth = this.sensor.DepthStream.FrameWidth;
this.depthHeight = this.sensor.DepthStream.FrameHeight;
int colorWidth = this.sensor.ColorStream.FrameWidth;
int colorHeight = this.sensor.ColorStream.FrameHeight;
this.colorToDepthDivisor = colorWidth / this.depthWidth;
this.sensor.AllFramesReady += this.SensorAllFramesReady;

//...

private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
    // in the middle of shutting down, so nothing to do
    if (null == this.sensor)
    {
        return;
    }

    bool depthReceived = false;
    bool colorReceived = false;

    using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
    {
        if (null != depthFrame)
        {
            // Copy the pixel data from the image to a temporary array
            depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);

            depthReceived = true;
        }
    }

    using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
    {
        if (null != colorFrame)
        {
            // Copy the pixel data from the image to a temporary array
            colorFrame.CopyPixelDataTo(this.colorPixels);

            colorReceived = true;
        }
    }

    if (true == depthReceived)
    {
        this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(
            DepthFormat,
            this.depthPixels,
            ColorFormat,
            this.colorCoordinates);

        // ...

        int depthIndex = x + (y * this.depthWidth);
        DepthImagePixel depthPixel = this.depthPixels[depthIndex];

        // scale color coordinates to depth resolution
        int X = colorImagePoint.X / this.colorToDepthDivisor;
        int Y = colorImagePoint.Y / this.colorToDepthDivisor;

        // depthPixel is the depth for the (X,Y) pixel in the color frame
    }
}

关于c# - 将深度图像与 RGB 图像对齐,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/12983019/

相关文章:

c# - 可变大小所有者绘制 ComboBox;列表大小不正确

c# - 从 UWP 应用读取设备序列号和/或 IMEI

algorithm - 使用来自多个点的多个图像构建 3d 模型(kinect)

c++ - Kinect v2 将颜色坐标映射到相机空间

c# - 将 Kinect 深度保存到二进制文件

c# - Dapper 的 IEnumerable<T> 是延迟执行还是立即执行?

c# - 所有 Accordion 行同时打开,如何解决?

c# - xbox kinect 的 C# 库是否适用于 windows kinect SDK?

c# - SQL 正确地将整数格式化为日期

java - 使用Java中的Kinect数据集计算两条线(代表人的 ARM )之间的角度