cuda - 使用 CUDA 手册的 Roofline 模型与 Nsight 计算

标签 cuda nsight nvprof nsight-compute roofline

我有一个为 CUDA 编写的非常简单的向量加法内核。 我想计算该内核的算术强度以及 GFLOP/s。 我计算的值与 Nsight Compute 的屋顶线分析部分获得的值明显不同。

由于我有一个非常简单的农场 C = A + B 向量加法内核,所有三个向量的大小均为 N 我期望,我期望:N 算术运算和 3 x N x 4(假设 sizeof(float)==4)字节被访问,这将导致算术强度大约为 0.083。

此外,我希望 GFLOP/s 之外的值是 N x 1e-9/kernel_time_in_seconds 我计算的值与 Nsight 计算明显不同,我知道 Nsight 计算会降低时钟速度,但我希望算术强度(每字节操作)的值相同(或大致相同,因为它具有配置文件代码)。

我的 CUDA 内核如下所示:

#include <iostream>
#include <cuda_runtime.h>

#define N 200000

__global__ void vectorAdd(float* a, float* b, float* c)
{
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    if (tid < N)
    {
        c[tid] = a[tid] + b[tid];
    }
}

int main()
{
    // Declare and initialize host vectors
    float* host_a = new float[N];
    float* host_b = new float[N];
    float* host_c = new float[N];
    for (int i = 0; i < N; ++i)
    {
        host_a[i] = i;
        host_b[i] = 2 * i;
    }

    // Declare and allocate device vectors
    float* dev_a, * dev_b, * dev_c;
    cudaMalloc((void**)&dev_a, N * sizeof(float));
    cudaMalloc((void**)&dev_b, N * sizeof(float));
    cudaMalloc((void**)&dev_c, N * sizeof(float));

    // Copy host vectors to device
    cudaMemcpy(dev_a, host_a, N * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_b, host_b, N * sizeof(float), cudaMemcpyHostToDevice);

    // Define kernel launch configuration
    int blockSize, gridSize;
    cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, vectorAdd, 0, N);

    // Start timer
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    cudaEventRecord(start);

    // Launch kernel
    vectorAdd<<<gridSize, blockSize>>>(dev_a, dev_b, dev_c);

    // Stop timer and calculate execution duration
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, start, stop);

    // Copy result from device to host
    cudaMemcpy(host_c, dev_c, N * sizeof(float), cudaMemcpyDeviceToHost);
    cudaDeviceSynchronize();

    // Print execution duration
    std::cout << "Kernel execution duration: " << milliseconds << " ms" << std::endl;

    int numFloatingPointOps = N;
    int numBytesAccessed = 3 * N * sizeof(float);
    float opsPerByte = static_cast<float>(numFloatingPointOps) / static_cast<float>(numBytesAccessed);

    std::cout << "Floating-point operations per byte: " << opsPerByte << std::endl;

    float executionTimeSeconds = milliseconds / 1e3;
    float numGFLOPs = static_cast<float>(numFloatingPointOps) / 1e9;
    float GFLOPs = numGFLOPs / executionTimeSeconds;

    std::cout << "GFLOP/s: " << GFLOPs << std::endl;

    // Cleanup
    cudaFree(dev_a);
    cudaFree(dev_b);
    cudaFree(dev_c);
    delete[] host_a;
    delete[] host_b;
    delete[] host_c;

    return 0;
}

我的电脑上的示例输出:

Kernel execution duration: 0.014144 ms
Floating-point operations per byte: 0.0833333
GFLOP/s: 14.1403

编译并运行/分析只需:

nvcc vectorAdd.cu
sudo env "PATH=$PATH" ncu -f -o vectorAdd_rep --set full ./a.out

Nsightcompute说算术强度是0.12,我有一张它的照片: Roofline Graph from Nsight compuite

当我查看与全局加载 (LDG) 和存储 (STG) 相关的指令统计操作时,其数量是 FADD(逐元素 float 加法)的 3 倍,对于 4 字节大小,我不希望达到 0.083,但它事实并非如此,两个算术强度之间差异的原因是什么,我做错了什么吗?我检查的其他指令似乎与算术强度计算无关。

我在指令统计上添加了一张照片: Instruction Statistics

最佳答案

按照 Jérôme Richard 的建议更新代码我可以找出问题所在。首先,对于小网格尺寸,使用 Nsight Compute 获得的结果并不准确。有了足够的元素,Nsight 计算的结果与我的结果非常接近。

另一个重要的注意事项是,分析代码以较低的时钟速度运行,因为确定理论界限(内存传输和达到的峰值 FLOP/s)都小于通过调用 CUDA 可以获得的值API。我可以进一步验证,在我的代码和 Nsight Compute 中,所达到的峰值性能百分比(相对于算术强度)非常相似。这是更新后的代码:

#include <iostream>
#include <cuda_runtime.h>

constexpr size_t N = static_cast<size_t>(1e9 / static_cast<float>(sizeof(float)));

#define CHECK_ERR checkErr(__FILE__,__LINE__)

std::string PrevFile = "";
int PrevLine = 0;

void checkErr(const std::string &File, int Line) {{
#ifndef NDEBUG
    cudaError_t Error = cudaGetLastError();
    if (Error != cudaSuccess) {{
        std::cout << std::endl << File
                << ", line " << Line
                << ": " << cudaGetErrorString(Error)
                << " (" << Error << ")"
                << std::endl;

        if (PrevLine > 0)
        std::cout << "Previous CUDA call:" << std::endl
                    << PrevFile << ", line " << PrevLine << std::endl;
        throw;
    }}
    PrevFile = File;
    PrevLine = Line;
#endif
}}

__global__ void vectorAdd(float* a, float* b, float* c)
{
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    if (tid < N)
    {
        c[tid] = a[tid] + b[tid];
    }
}

int main()
{
    // Declare and initialize host vectors
    float* host_a = new float[N];
    float* host_b = new float[N];
    float* host_c = new float[N];
    for (int i = 0; i < N; ++i)
    {
        host_a[i] = i;
        host_b[i] = 2 * i;
    }

    // Declare and allocate device vectors
    float* dev_a, * dev_b, * dev_c;
    cudaMalloc((void**)&dev_a, N * sizeof(float)); CHECK_ERR;
    cudaMalloc((void**)&dev_b, N * sizeof(float)); CHECK_ERR;
    cudaMalloc((void**)&dev_c, N * sizeof(float)); CHECK_ERR;

    // Copy host vectors to device
    cudaMemcpy(dev_a, host_a, N * sizeof(float), cudaMemcpyHostToDevice); CHECK_ERR;
    cudaMemcpy(dev_b, host_b, N * sizeof(float), cudaMemcpyHostToDevice); CHECK_ERR;

    // Define kernel launch configuration
    // int blockSize, gridSize;
    // cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, vectorAdd, 0, N); CHECK_ERR;vectorAdd<<<gridSize, blockSize>>>(dev_a, dev_b, dev_c); CHECK_ERR;
    int blockSize = 256;
    int gridSize = (N + blockSize - 1) / blockSize;

    // Fire first kernel and discard
    vectorAdd<<<gridSize, blockSize>>>(dev_a, dev_b, dev_c); CHECK_ERR;
    cudaDeviceSynchronize();

    // Start timer
    cudaEvent_t start, stop;
    cudaEventCreate(&start); CHECK_ERR;
    cudaEventCreate(&stop); CHECK_ERR;
    cudaEventRecord(start); CHECK_ERR;

    // Launch kernel
    vectorAdd<<<gridSize, blockSize>>>(dev_a, dev_b, dev_c); CHECK_ERR;

    // Stop timer and calculate execution duration
    cudaEventRecord(stop); CHECK_ERR;
    cudaEventSynchronize(stop); CHECK_ERR;
    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, start, stop); CHECK_ERR;

    // Copy result from device to host
    cudaMemcpy(host_c, dev_c, N * sizeof(float), cudaMemcpyDeviceToHost); CHECK_ERR;
    cudaDeviceSynchronize(); CHECK_ERR;

    for (int i = 0; i < N; ++i)
    {
        if (host_c[i] > 1.001f * (3.0f * static_cast<float>(i)) ||
            host_c[i] < 0.999f * (3.0f * static_cast<float>(i))){
            throw std::runtime_error("Results different from expected " + std::to_string(host_c[i]) + " != " + std::to_string(3.0f * static_cast<float>(i)));
        }
    }

    // Print execution duration
    std::cout << "Kernel execution duration: " << milliseconds << " ms" << std::endl;

    size_t numFloatingPointOps = N;
    size_t numBytesAccessed = 3 * N * sizeof(float);
    float opsPerByte = static_cast<float>(numFloatingPointOps) / static_cast<float>(numBytesAccessed);

    std::cout << "Floating-point operations per byte: " << opsPerByte << std::endl;

    float executionTimeSeconds = milliseconds / 1e3;
    float numGFLOPs = static_cast<float>(numFloatingPointOps) / 1e9;
    float GFLOPs = numGFLOPs / executionTimeSeconds;

    std::cout << "GFLOP/s: " << GFLOPs << std::endl;

    float peakMemoryBandwidthTheo = 176.032; // GB /s
    float peakGFLOPTheo  = 4329.47; // GFlop /s
    float peakGFLOPforIntensity = std::min(peakMemoryBandwidthTheo * opsPerByte, peakGFLOPTheo);

    float achievedPeak = (static_cast<float>(GFLOPs) / peakGFLOPforIntensity) * 100.0f;
    std::string strAchievedPeak(6, '\0');
    std::sprintf(&strAchievedPeak[0], "%.2f", achievedPeak);
    std::cout << "Percentage of Peak Performance: " << strAchievedPeak << "%" << std::endl;

    float GBPerSecond = (static_cast<float>(numBytesAccessed) * 1e-9) / executionTimeSeconds;
    std::cout << "GB per Second: " << GBPerSecond << std::endl;

    // Cleanup
    cudaFree(dev_a); CHECK_ERR;
    cudaFree(dev_b); CHECK_ERR;
    cudaFree(dev_c); CHECK_ERR;
    delete[] host_a;
    delete[] host_b;
    delete[] host_c;

    return 0;
}

我的 RTX 3050 的输出示例:

Kernel execution duration: 17.6701 ms
Floating-point operations per byte: 0.0833333
GFLOP/s: 14.1482
Percentage of Peak Performance: 96.45%
GB per Second: 169.778

关于cuda - 使用 CUDA 手册的 Roofline 模型与 Nsight 计算,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/76673770/

相关文章:

c - 在 CUDA 中使用常量内存和结构数组

apache-spark - 如何让 Apache Spark 在 GPU 上运行?

c# - 我可以使用 NVIDIA nsight 对 WPF 性能进行故障排除吗?

cuda - Nvidia 的 nvprof 输出为 FLOPS

cuda - 如何观察可执行文件的一部分的 CUDA 事件和指标(例如,仅在内核执行期间)?

c - nvprof 没有获取任何 API 调用或内核

c++ - "Lane User Stack Overflow"调试CUDA程序

cuda - 在 nvidia gpu 上,__hmul 使用 fp32 核心吗?

eclipse - 输出太多leed eclipse死了,怎么控制

c++11 - 具有 c++11 支持的 CUDA NSight 7.0 - 在哪里设置?