c++ - 您如何测量 OpenGL 中的峰值内存带宽?

标签 c++ opengl glsl nvidia

为了了解我应该期待什么样的速度,我一直在尝试对全局内存和着色器之间的传输进行基准测试,而不是依赖 GPU 规范表。但是我无法接近理论最大值。事实上,我已经出局了 50!。

我使用的是 GTX Titan X,它是 said to have 336.5GB/s . Linux x64 驱动程序 352.21。

我找到了一个 CUDA 基准 here这给了我约 240–250GB/s (这是我所期望的更多)。

我正在尝试完全匹配他们对着色器所做的事情。我尝试过顶点着色器、计算着色器、通过 image_load_store 访问缓冲区对象和 NV_shader_buffer_store ,使用 floats、vec4s、着色器内的循环(在工作组内使用合并寻址)和各种计时方法。我被困在 ~7GB/s(请参阅下面的更新)。

为什么 GL 这么慢?我是不是做错了什么,如果是,应该怎么做?

这是我使用三种方法的 MWE(1. 使用 image_load_store 的顶点着色器,2. 使用无绑定(bind)图形的顶点着色器,3. 使用无绑定(bind)图形的计算着色器):

//#include <windows.h>
#include <assert.h>
#include <stdio.h>
#include <memory.h>
#include <GL/glew.h>
#include <GL/glut.h>

const char* imageSource =
    "#version 440\n"
    "uniform layout(r32f) imageBuffer data;\n"
    "uniform float val;\n"
    "void main() {\n"
    "   imageStore(data, gl_VertexID, vec4(val, 0.0, 0.0, 0.0));\n"
    "   gl_Position = vec4(0.0);\n"
    "}\n";

const char* bindlessSource =
    "#version 440\n"
    "#extension GL_NV_gpu_shader5 : enable\n"
    "#extension GL_NV_shader_buffer_load : enable\n"
    "uniform float* data;\n"
    "uniform float val;\n"
    "void main() {\n"
    "   data[gl_VertexID] = val;\n"
    "   gl_Position = vec4(0.0);\n"
    "}\n";

const char* bindlessComputeSource =
    "#version 440\n"
    "#extension GL_NV_gpu_shader5 : enable\n"
    "#extension GL_NV_shader_buffer_load : enable\n"
    "layout(local_size_x = 256) in;\n"
    "uniform float* data;\n"
    "uniform float val;\n"
    "void main() {\n"
    "   data[gl_GlobalInvocationID.x] = val;\n"
    "}\n";

GLuint compile(GLenum type, const char* shaderSrc)
{
    GLuint shader = glCreateShader(type);
    glShaderSource(shader, 1, (const GLchar**)&shaderSrc, NULL);
    glCompileShader(shader);
    int success = 0;
    int loglen = 0;
    glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
    glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &loglen);
    GLchar* log = new GLchar[loglen];
    glGetShaderInfoLog(shader, loglen, &loglen, log);
    if (!success)
    {
        printf("%s\n", log);
        exit(0);
    }
    GLuint program = glCreateProgram();
    glAttachShader(program, shader);
    glLinkProgram(program);
    return program;
}

GLuint timerQueries[2];
void start()
{
    glGenQueries(2, timerQueries);
    glQueryCounter(timerQueries[0], GL_TIMESTAMP);
}

float stop()
{
    glMemoryBarrier(GL_ALL_BARRIER_BITS);
    GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
    glWaitSync(sync, 0, GL_TIMEOUT_IGNORED);
    glQueryCounter(timerQueries[1], GL_TIMESTAMP);
    GLint available = 0;
    while (!available) //sometimes gets stuck here for whatever reason
        glGetQueryObjectiv(timerQueries[1], GL_QUERY_RESULT_AVAILABLE, &available);
    GLuint64 a, b;
    glGetQueryObjectui64v(timerQueries[0], GL_QUERY_RESULT, &a);
    glGetQueryObjectui64v(timerQueries[1], GL_QUERY_RESULT, &b);
    glDeleteQueries(2, timerQueries);
    return b - a;
}

int main(int argc, char** argv)
{
    float* check;
    glutInit(&argc, argv);
    glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
    glutCreateWindow("test");
    glewInit();

    int bufferSize = 64 * 1024 * 1024; //64MB
    int loops = 500;

    glEnable(GL_RASTERIZER_DISCARD);

    float* dat = new float[bufferSize/sizeof(float)];
    memset(dat, 0, bufferSize);

    //create a buffer with data
    GLuint buffer;
    glGenBuffers(1, &buffer);
    glBindBuffer(GL_TEXTURE_BUFFER, buffer);
    glBufferData(GL_TEXTURE_BUFFER, bufferSize, NULL, GL_STATIC_DRAW);

    //get a bindless address
    GLuint64 address;
    glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
    glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);

    //make a texture alias for it
    GLuint bufferTexture;
    glGenTextures(1, &bufferTexture);
    glBindTexture(GL_TEXTURE_BUFFER, bufferTexture);
    glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, buffer);
    glBindImageTextureEXT(0, bufferTexture, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32F);

    //compile the shaders
    GLuint imageShader = compile(GL_VERTEX_SHADER, imageSource);
    GLuint bindlessShader = compile(GL_VERTEX_SHADER, bindlessSource);
    GLuint bindlessComputeShader = compile(GL_COMPUTE_SHADER, bindlessComputeSource);

    //warm-up and check values
    glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
    glUseProgram(imageShader);
    glUniform1i(glGetUniformLocation(imageShader, "data"), 0);
    glUniform1f(glGetUniformLocation(imageShader, "val"), 1.0f);
    glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
    glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
    //check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
    //for (int i = 0; i < bufferSize/sizeof(float); ++i)
    //  assert(check[i] == 1.0f);
    //glUnmapBuffer(GL_TEXTURE_BUFFER);

    glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
    glUseProgram(bindlessShader);
    glProgramUniformui64NV(bindlessShader, glGetUniformLocation(bindlessShader, "data"), address);
    glUniform1f(glGetUniformLocation(bindlessShader, "val"), 1.0f);
    glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
    //glMemoryBarrier(GL_ALL_BARRIER_BITS); //this causes glDispatchCompute to segfault later, so don't uncomment
    //check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
    //for (int i = 0; i < bufferSize/sizeof(float); ++i)
    //  assert(check[i] == 1.0f);
    //glUnmapBuffer(GL_TEXTURE_BUFFER);

    glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
    glUseProgram(bindlessComputeShader);
    glProgramUniformui64NV(bindlessComputeShader, glGetUniformLocation(bindlessComputeShader, "data"), address);
    glUniform1f(glGetUniformLocation(bindlessComputeShader, "val"), 1.0f);
    glDispatchCompute(bufferSize/(sizeof(float) * 256), 1, 1);
    glMemoryBarrier(GL_ALL_BARRIER_BITS);
    //check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
    //for (int i = 0; i < bufferSize/sizeof(float); ++i)
    //  assert(check[i] == 1.0f); //glDispatchCompute doesn't actually write anything with bindless graphics
    //glUnmapBuffer(GL_TEXTURE_BUFFER);
    glFinish();

    //time image_load_store
    glUseProgram(imageShader);
    glUniform1i(glGetUniformLocation(imageShader, "data"), 0);
    glUniform1f(glGetUniformLocation(imageShader, "val"), 1.0f);
    start();
    for (int i = 0; i < loops; ++i)
        glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
    GLuint64 imageTime = stop();
    printf("image_load_store: %.2fGB/s\n", (float)((bufferSize * (double)loops) / imageTime));

    //time bindless
    glUseProgram(bindlessShader);
    glProgramUniformui64NV(bindlessShader, glGetUniformLocation(bindlessShader, "data"), address);
    glUniform1f(glGetUniformLocation(bindlessShader, "val"), 1.0f);
    start();
    for (int i = 0; i < loops; ++i)
        glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
    GLuint64 bindlessTime = stop();
    printf("bindless: %.2fGB/s\n", (float)((bufferSize * (double)loops) / bindlessTime));

    //time bindless in a compute shader
    glUseProgram(bindlessComputeShader);
    glProgramUniformui64NV(bindlessComputeShader, glGetUniformLocation(bindlessComputeShader, "data"), address);
    glUniform1f(glGetUniformLocation(bindlessComputeShader, "val"), 1.0f);
    start();
    for (int i = 0; i < loops; ++i)
        glDispatchCompute(bufferSize/(sizeof(float) * 256), 1, 1);
    GLuint64 bindlessComputeTime = stop();
    printf("bindless compute: %.2fGB/s\n", (float)((bufferSize * (double)loops) / bindlessComputeTime));
    assert(glGetError() == GL_NO_ERROR);
    return 0;
}

我的输出:

image_load_store: 6.66GB/s
bindless: 6.68GB/s
bindless compute: 6.65GB/s

一些注意事项:

  1. 使用无绑定(bind)图形计算着色器似乎没有写入任何内容(注释掉的断言失败),或者至少即使速度与其他方法匹配,也没有使用 glMapBuffer 检索数据。在计算着色器中使用 image_load_store 可以工作,并提供与顶点着色器相同的速度(尽管我认为发布的排列太多了)。
  2. glDispatchCompute 之前调用 glMemoryBarrier(GL_ALL_BARRIER_BITS) 会导致驱动程序崩溃。
  3. 注释掉用于检查输出的三个 glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW); 将前两个测试的速度提高到 17GB/s 和计算着色器飙升至 292GB/s,这更接近我想要的,但由于第 1 点,这不可信。
  4. 有时 while (!available) 会挂起很长时间(当我厌倦了等待时,按 ctrl-c 会显示它仍在循环中)。

作为引用,这里是 CUDA 代码:

//http://www.ks.uiuc.edu/Research/vmd/doxygen/CUDABench_8cu-source.html

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>

#define CUERR { cudaError_t err; \
    if ((err = cudaGetLastError()) != cudaSuccess) { \
    printf("CUDA error: %s, %s line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
    return -1; }}

//
// GPU device global memory bandwidth benchmark
//
template <class T>
__global__ void gpuglobmemcpybw(T *dest, const T *src) {
    const unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
    dest[idx] = src[idx];
}

template <class T>
__global__ void gpuglobmemsetbw(T *dest, const T val) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;
    dest[idx] = val;
}

typedef float4 datatype;

static int cudaglobmembw(int cudadev, double *gpumemsetgbsec, double *gpumemcpygbsec) {
    int i;
    int len = 1 << 22; // one thread per data element
    int loops = 500;
    datatype *src, *dest;
    datatype val=make_float4(1.0f, 1.0f, 1.0f, 1.0f);

    // initialize to zero for starters
    float memsettime = 0.0f;
    float memcpytime = 0.0f;
    *gpumemsetgbsec = 0.0;
    *gpumemcpygbsec = 0.0;

    // attach to the selected device
    cudaError_t rc;
    rc = cudaSetDevice(cudadev);
    if (rc != cudaSuccess) {
        #if CUDART_VERSION >= 2010
        rc = cudaGetLastError(); // query last error and reset error state
        if (rc != cudaErrorSetOnActiveProcess)
        return -1; // abort and return an error
        #else
        cudaGetLastError(); // just ignore and reset error state, since older CUDA
        // revs don't have a cudaErrorSetOnActiveProcess enum
        #endif
    }

    cudaMalloc((void **) &src, sizeof(datatype)*len);
    CUERR
    cudaMalloc((void **) &dest, sizeof(datatype)*len);
    CUERR

    dim3 BSz(256, 1, 1);
    dim3 GSz(len / (BSz.x * BSz.y * BSz.z), 1, 1); 

    // do a warm-up pass
    gpuglobmemsetbw<datatype><<< GSz, BSz >>>(src, val);
    CUERR
    gpuglobmemsetbw<datatype><<< GSz, BSz >>>(dest, val);
    CUERR
    gpuglobmemcpybw<datatype><<< GSz, BSz >>>(dest, src);
    CUERR

    cudaEvent_t start, end;
    cudaEventCreate(&start);
    cudaEventCreate(&end);

    // execute the memset kernel
    cudaEventRecord(start, 0);
    for (i=0; i<loops; i++) {
    gpuglobmemsetbw<datatype><<< GSz, BSz >>>(dest, val);
    }
    CUERR
    cudaEventRecord(end, 0);
    CUERR
    cudaEventSynchronize(start);
    CUERR
    cudaEventSynchronize(end);
    CUERR
    cudaEventElapsedTime(&memsettime, start, end);
    CUERR

    // execute the memcpy kernel
    cudaEventRecord(start, 0);
    for (i=0; i<loops; i++) {
    gpuglobmemcpybw<datatype><<< GSz, BSz >>>(dest, src);
    }
    cudaEventRecord(end, 0);
    CUERR
    cudaEventSynchronize(start);
    CUERR
    cudaEventSynchronize(end);
    CUERR
    cudaEventElapsedTime(&memcpytime, start, end);
    CUERR

    cudaEventDestroy(start);
    CUERR
    cudaEventDestroy(end);
    CUERR

    *gpumemsetgbsec = (len * sizeof(datatype) / (1024.0 * 1024.0)) / (memsettime / loops);
    *gpumemcpygbsec = (2 * len * sizeof(datatype) / (1024.0 * 1024.0)) / (memcpytime / loops);
    cudaFree(dest);
    cudaFree(src);
    CUERR

    return 0;
}

int main()
{
    double a, b;
    cudaglobmembw(0, &a, &b);
    printf("%f %f\n", (float)a, (float)b);
    return 0;
}

更新:

似乎缓冲区在我的 glBufferData 调用中变得非驻留,这些调用用于检查正在写入的输出。根据 the extension :

A buffer is also made non-resident implicitly as a result of being respecified via BufferData or being deleted.
...
BufferData is specified to "delete the existing data store", so the GPU address of that data should become invalid. The buffer is therefore made non-resident in the current context.

据推测,OpenGL 会在每一帧的缓冲区对象数据中流式传输,并且不会将其缓存在视频内存中。这解释了为什么计算着色器未能通过断言,但是有一点异常,即顶点着色器中的无绑定(bind)图形在不驻留时仍然有效,但我现在将忽略它。我不知道当有 12GB 可用时,为什么 64MB 缓冲区对象不会默认驻留(尽管可能在第一次使用之后)。

所以在每次调用 glBufferData 之后,我都会让它再次驻留并获取地址以防它发生变化:

glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);
assert(glIsBufferResidentNV(GL_TEXTURE_BUFFER)); //sanity check

我现在使用 either image_load_store 或无绑定(bind)图形的计算着色器获得 270–290GB/s。 现在我的问题包括:

  • 考虑到每个测试的缓冲区似乎都是常驻的,并且计算着色器又好又快,为什么顶点着色器版本仍然如此缓慢?
  • 如果没有无绑定(bind)图形扩展,普通 OpenGL 用户应该如何将数据放入显存(实际上是放入,而不是随便建议驱动程序可能只是想这样做)?

    我很确定我会在现实世界的情况下注意到这个问题,而且正是这个人为的基准测试遇到了一个缓慢的路径,那么我该如何欺骗驱动程序使缓冲区对象常驻?首先运行计算着色器不会改变任何东西。

最佳答案

您要求驱动程序从您的进程内存中读取,dat。这会导致大量缓存一致性流量。当 GPU 读取该内存时,它不能确定它是最新的,它可能在 CPU 缓存中,已修改,但尚未写回 RAM。这导致GPU实际上必须从CPU缓存中读取,这比绕过CPU并读取RAM要昂贵得多。 RAM 在正常运行期间经常处于空闲状态,因为现代 CPU 的命中率通常为 95% 到 99%。缓存会持续使用。

要获得最大性能,您需要让驱动程序分配内存。您的程序使用的普通内存,如全局变量和堆都分配在 writeback 内存中。驱动程序分配的内存通常会被分配为写结合不可缓存,这消除了一致性流量。

只有在没有缓存一致性开销的情况下才能实现峰值通告带宽数。

要让驱动程序分配它,请将 glBufferDatanullptr 一起用于数据。

不过,如果您设法强制驱动程序使用系统内存写入组合缓冲区,这并不全是美好的。 CPU 读取这些地址会非常慢。 CPU对顺序写入进行了优化,但随机写入会导致写组合缓冲区频繁刷新,影响性能。

关于c++ - 您如何测量 OpenGL 中的峰值内存带宽?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31136429/

相关文章:

c++ - 在 OpenGL/GLFW 中管理默认着色器

opengl - glsl 和 opencl 中的钳位函数如何工作?它使用创建分支吗?我应该避免使用它吗?

iphone - OpenGL ES 2.0 - vec2 数组

c++ - OpenGL 矩阵相机控件,局部旋转功能不正常

c++ - 片段着色器是否处理来自顶点着色器的所有像素?

opengl - OpenGL 中的广告牌效果

c++ - 比较 std::system_error::code() 和 std::errc::invalid_argument 时出现段错误

c++ - QOpenGLShaderProgram - 多个实例产生损坏的结果

c++ - 可变参数模板求和运算左关联

c++ - 使用 stringstream 读取浮点值的奇怪失败