opencl - 使用 OpenCL 的矩阵向量乘法

标签 opencl matrix-multiplication multiplication

我有 2 个不同的程序。

首先使用opencl进行矩阵-矩阵乘法。在我的 GPU 上它产生的结果比在主机 CPU 上要好得多(例如 0.2 秒对 18 秒)。

其次使用 opencl 进行矩阵向量乘法,它在 GPU 上的运行速度稍慢,然后在主机 CPU 上运行。

原因是什么?

这是内核

__kernel void matrixVectorMul(__global float* resultVector,
    __global float* matrixA,
    __global float* vectorB, 
    int width_A)
{
    int tx = get_global_id(0); 

    float value = 0;
    for (unsigned int k = 0; k < width_A; ++k) {
        value += matrixA[tx * width_A + k] * vectorB[k];
    }

    resultVector[tx] = value;
}

和主机代码

#include <stdlib.h>
#define __CL_ENABLE_EXCEPTIONS
#include "cl.hpp"
#include <fstream>
#include <iostream>
#include <time.h>
#include <cmath>

#define LOCAL_SIZE 512
#define WIDTH_A (4096*2)
#define HEIGHT_A (4096*2)

float *matrix_A;
float *vector_B;
float *result_vector;
float *result_vector_host;

void randomInit(float *data, int size) {
    for (unsigned int i = 0; i < size; ++i)
        data[i] = rand() / (float)RAND_MAX;
}

void GenerateTestData() {
    srand((unsigned int)time(NULL));    

    unsigned int size_A = WIDTH_A * HEIGHT_A;
    matrix_A = new float[size_A];

    vector_B = new float[WIDTH_A];

    randomInit(matrix_A, size_A);
    randomInit(vector_B, WIDTH_A);

    result_vector = new float[WIDTH_A];
    result_vector_host = new float[WIDTH_A];
}

void PerformCalculationOnDevice(cl::Device device) {
    clock_t start_t, end_t;
    start_t = clock();
    std::vector<cl::Device> contextDevices;
    contextDevices.push_back(device);
    cl::Context context(contextDevices);

    cl::CommandQueue queue(context, device);

    std::fill_n(result_vector, WIDTH_A, 0);

    cl::Buffer cl_matrix_A = cl::Buffer(context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * HEIGHT_A * sizeof(float), matrix_A);
    cl::Buffer cl_vector_B = cl::Buffer(context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * sizeof(float), vector_B);
    cl::Buffer cl_result_vector = cl::Buffer(context, CL_MEM_WRITE_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * sizeof(float), result_vector);
    end_t = clock();
    std::cout << "Context, queue, buffers " << (float)(end_t - start_t) / CLOCKS_PER_SEC << std::endl;

    std::ifstream sourceFile("MatrixVectorMultiplicationKernel.cl");
    std::string sourceCode(std::istreambuf_iterator<char>(sourceFile),(std::istreambuf_iterator<char>()));

    cl::Program::Sources source(1, std::make_pair(sourceCode.c_str(), sourceCode.length()+1));
    cl::Program program = cl::Program(context, source);
    program.build(contextDevices);
    cl::Kernel kernel(program, "matrixVectorMul");

    int iArg = 0;
    kernel.setArg(iArg++, cl_result_vector);
    kernel.setArg(iArg++, cl_matrix_A);
    kernel.setArg(iArg++, cl_vector_B);
    kernel.setArg(iArg++, WIDTH_A);

    start_t = clock();
    queue.enqueueNDRangeKernel(kernel, cl::NullRange, cl::NDRange(HEIGHT_A), cl::NDRange(LOCAL_SIZE));
    queue.finish();

    queue.enqueueReadBuffer(cl_result_vector, CL_TRUE, 0, WIDTH_A * sizeof(float), result_vector);
    end_t = clock();
    std::cout << "enqueueNDRangeKernel and enqueueReadBuffer " << (float)(end_t - start_t) / CLOCKS_PER_SEC << std::endl;
}

void PerformCalculationOnHost() {
    float tmp;
    for(int row_A = 0; row_A < HEIGHT_A; row_A++) {
        tmp = 0;
        for(int col_A = 0; col_A < WIDTH_A; col_A++) {
            tmp += matrix_A[row_A * WIDTH_A + col_A] * vector_B[col_A];
        }
        result_vector_host[row_A] = tmp;
    }
}

int main(int argc, char** argv) {
    GenerateTestData();

    std::vector<cl::Platform> platforms;
    cl::Platform::get(&platforms);
    std::vector<cl::Device> devices;
    clock_t start_t = clock();
    for (unsigned int iPlatform=0; iPlatform<platforms.size(); iPlatform++) {
        platforms[iPlatform].getDevices(CL_DEVICE_TYPE_ALL, &devices);
        for (unsigned int iDevice=0; iDevice<devices.size(); iDevice++) {
            try {
                PerformCalculationOnDevice(devices[iDevice]);
            } catch (cl::Error error) {
                std::cout << error.what() << "(" << error.err() << ")" << std::endl;   
            }
        }
    }
    clock_t end_t = clock();
    std::cout << "Device: " << (float)(end_t - start_t) / CLOCKS_PER_SEC << " seconds" << std::endl;
    start_t = clock();
    PerformCalculationOnHost();
    end_t = clock();
    std::cout << "Host: " << (float)(end_t - start_t) / CLOCKS_PER_SEC << " seconds" << std::endl;
    int errors = 0;
    float mean_deviation = 0;
    FILE *f, *f_host;
    f = fopen("device_result", "w");
    f_host = fopen("host_result", "w");
    for(int i = 0; i < WIDTH_A; i++) {
            if(fabs(result_vector[i] - result_vector_host[i]) > 1E-3) {
                errors++;
            }
            fprintf(f, "%.2f\n", result_vector[i]);
            fprintf(f_host, "%.2f\n", result_vector_host[i]);
            mean_deviation += fabs(result_vector[i] - result_vector_host[i]);
    }
    fclose(f); fclose(f_host);
    mean_deviation /= WIDTH_A;
    std::cout << "Errors = " << errors << std::endl;
    std::cout << "Mean deviation = " << mean_deviation << std::endl;

    delete[](matrix_A);
    delete[](vector_B);
    delete[](result_vector);
    delete[](result_vector_host);
    return 0;
}

当我运行它时,我得到以下结果

Context, queue, buffers 0.45
enqueueNDRangeKernel and enqueueReadBuffer 1.31
Device: 1.79 seconds
Host: 1.42 seconds
Errors = 0
Mean deviation = 8.78572e-05

最佳答案

在这种情况下,为了提高 GPU 效率,需要更多的工作项(每个输出值一个是不够的)并且计算/内存访问比率应该更高(即尽可能多次重用值)。

我前段时间写过几页关于这个问题,如果你有兴趣:GPU matrix-vector product .

关于opencl - 使用 OpenCL 的矩阵向量乘法,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/15597299/

相关文章:

java - 欧拉计划 #11

c - 如何在OpenCL中使用并行归约来实现求和?

python - 使用python3实现FFT和IFFT

c++ - 在OpenCL中以编程方式选择最佳GPU的最佳方法是什么?

python - C 连续矩阵上的 Fortran gemm 函数

c - 如何执行快速数组乘法?

java - 2个二维数组相乘的方法

javascript - 在 For 循环中乘以整数

opencl - 使用 NVIDIA 的 nvcc 编译器编译和构建 .cl 文件?

open-source - 用于蛋白质折叠的开源 GPGPU 项目