c - 我们如何使用 AleaGpu 将多维数组复制到内核?

标签 c arrays aleagpu

我们如何将多维数组复制到 AleaGPU 的内核中? 我们如何在内核中使用多维数组进行开发?

Malloc 似乎不接受它?

double[,] inputs;
double[,] dInputs1 = Worker.Malloc(inputs);    // I get an error here
var dOutputs1 = Worker.Malloc<double>(inputs1.Length)
Worker.Launch(SquareKernel, lp, dOutputs1.Ptr, dInputs1.Ptr,  inputs.Length);  //dInputs1.Ptr Make an error

[AOTCompile]
static void SquareKernel(deviceptr<double> outputs, deviceptr<double[,]> inputs, int n)
{
    var start = blockIdx.x * blockDim.x + threadIdx.x;
    var stride = gridDim.x * blockDim.x;
    for (var i = start; i < n; i += stride)
    {
        outputs[i] = inputs[i,0] * inputs[i,0];  
    }
}

最佳答案

Alea GPU 版本直到 2.2(目前最新)还不支持 malloc array2d,因此您必须在内核中自行按行和列展平索引。对于主机端,您可以制作一些扩展方法,以使用一些 CUDA 驱动程序 API P/Invoke(这些 P/Invoke 函数可从 Alea.CUDA.dll 获得)将固定的 .NET 数组传输到设备或从设备传输。

所以这是我写的一个快速解决方法:

using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using Alea.CUDA;
using Alea.CUDA.IL;
using NUnit.Framework;

namespace ConsoleApplication1
{
    static class Extension
    {
        public static DeviceMemory<T> Malloc<T>(this Worker worker, T[,] array2D)
        {
            var rows = array2D.GetLength(0);
            var cols = array2D.GetLength(1);
            var dmem = worker.Malloc<T>(rows*cols);

            var handle = GCHandle.Alloc(array2D, GCHandleType.Pinned);
            try
            {
                var hostPtr = handle.AddrOfPinnedObject();
                var devicePtr = dmem.Handle;
                // we now pinned .NET array, and need to copy them with CUDA Driver API
                // to do so we need use worker.Eval to make sure the worker's context is
                // pushed onto current thread.
                worker.EvalAction(() =>
                {
                    CUDAInterop.cuSafeCall(CUDAInterop.cuMemcpyHtoD(devicePtr, hostPtr,
                        new IntPtr(Intrinsic.__sizeof<T>()*rows*cols)));
                });
            }
            finally
            {
                handle.Free();
            }

            return dmem;
        }

        public static DeviceMemory<T> Malloc<T>(this Worker worker, int rows, int cols)
        {
            return worker.Malloc<T>(rows*cols);
        }

        public static void Gather<T>(this DeviceMemory<T> dmem, T[,] array2D)
        {
            var rows = array2D.GetLength(0);
            var cols = array2D.GetLength(1);

            var handle = GCHandle.Alloc(array2D, GCHandleType.Pinned);
            try
            {
                var hostPtr = handle.AddrOfPinnedObject();
                var devicePtr = dmem.Handle;
                // we now pinned .NET array, and need to copy them with CUDA Driver API
                // to do so we need use worker.Eval to make sure the worker's context is
                // pushed onto current thread.
                dmem.Worker.EvalAction(() =>
                {
                    CUDAInterop.cuSafeCall(CUDAInterop.cuMemcpyDtoH(hostPtr, devicePtr,
                        new IntPtr(Intrinsic.__sizeof<T>() * rows * cols)));
                });
            }
            finally
            {
                handle.Free();
            }
        }
    }

    class Program
    {
        static int FlattenIndex(int row, int col, int cols)
        {
            return row*cols + col;
        }

        [AOTCompile]
        static void Kernel(deviceptr<double> outputs, deviceptr<double> inputs, int rows, int cols)
        {
            // for simplicity, I do all things in one thread.
            for (var row = 0; row < rows; row++)
            {
                for (var col = 0; col < cols; col++)
                {
                    outputs[FlattenIndex(row, col, cols)] = inputs[FlattenIndex(row, col, cols)];
                }
            }
        }

        [Test]
        public static void Test()
        {
            var worker = Worker.Default;
            // make it small, for we only do it in one GPU thread.
            const int rows = 10;
            const int cols = 5;
            var rng = new Random();
            var inputs = new double[rows, cols];
            for (var row = 0; row < rows; ++row)
            {
                for (var col = 0; col < cols; ++col)
                {
                    inputs[row, col] = rng.Next(1, 100);
                }
            }
            var dInputs = worker.Malloc(inputs);
            var dOutputs = worker.Malloc<double>(rows, cols);
            var lp = new LaunchParam(1, 1);
            worker.Launch(Kernel, lp, dOutputs.Ptr, dInputs.Ptr, rows, cols);
            var outputs = new double[rows, cols];
            dOutputs.Gather(outputs);
            Assert.AreEqual(inputs, outputs);
        }

        public static void Main(string[] args)
        {

        }
    }
}

关于c - 我们如何使用 AleaGpu 将多维数组复制到内核?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/33154130/

相关文章:

javascript - 创建带有子数组的数组的函数

c# - 使用 Alea GPU 加速嵌套循环和按位运算

C# 处理空格

c - 将有符号的 char 数组传递给需要 const char 指针的函数会导致数组值出现乱码

c++ - 为什么我在播放 mp3 歌曲时收到错误提示 : bitstream problem, 重新同步跳过?

c - 你如何将值转换为 C 中的枚举常量?

f# - Floyd Warshall 使用 Alea GPU

c# - Alea GPU 教程未使用 FSharp.Core 4.4.0.0 在 VS 2015 Update 2 上编译

C 段错误打印缓冲区

c - 用 libgit2 实现 'git pull'?