cuda - 指定结束位时 cub::DeviceRadixSort 失败

标签 cuda nvidia gpgpu thrust cub

我正在使用 CUB 库的 GPU 基数排序算法对 N 个 32 位无符号整数进行排序,这些整数的值都仅使用 32 位中的 k 个,从最低有效位开始。

因此,我在调用 cub::DeviceRadixSort::SortKeys 时指定位子范围 [begin_bit, end_bit)希望能够提高排序性能。我正在使用最新版本的CUB (1.16.0) .

但是,当尝试使用某些指定位范围 [begin_bit=0, end_bit=k) 和 k = {20 对 10 亿个键进行排序时,SortKeys 会崩溃(不是确定性的,但几乎总是)并报告非法内存访问错误,19,18},例如./cub_sort_test 1000000000 0 20

我在分别使用 CUDA 版本 11.4 和 11.2 的 Volta 和 Ampere NVIDIA GPU 上对此进行了测试。有没有人以前遇到过这个问题,和/或知道解决方法?这是最小的、可重现的示例代码:

// HOW TO BUILD: nvcc -O3 -std=c++17 -Xcompiler -fopenmp cub_sort_test.cu -o cub_sort_test
#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>

#include <algorithm>
#include <chrono>
#include <iostream>
#include <parallel/algorithm>
#include <random>
#include <vector>
#include <iostream>

#define DEBUG

#ifdef DEBUG
#define CheckCudaError(instruction) \
  { AssertNoCudaError((instruction), __FILE__, __LINE__); }
#else
#define CheckCudaError(instruction) instruction
#endif

inline void AssertNoCudaError(cudaError_t error_code, const char* file, int line) {
  if (error_code != cudaSuccess) {
    std::cout << "Error: " << cudaGetErrorString(error_code) << " " << file << " " << line << "\n";
  }
}

template <typename T>
using PinnedHostVector = thrust::host_vector<T, thrust::system::cuda::experimental::pinned_allocator<T>>;

std::mt19937 SeedRandomGenerator(uint32_t distribution_seed) {
    const size_t seeds_bytes = sizeof(std::mt19937::result_type) * std::mt19937::state_size;
    const size_t seeds_length = seeds_bytes / sizeof(std::seed_seq::result_type);

    std::vector<std::seed_seq::result_type> seeds(seeds_length);
    std::generate(seeds.begin(), seeds.end(), [&]() {
        distribution_seed = (distribution_seed << 1) | (distribution_seed >> (-1 & 31));
        return distribution_seed;
    });
    std::seed_seq seed_sequence(seeds.begin(), seeds.end());

    return std::mt19937{seed_sequence};
}

int main(int argc, char* argv[]) {

    if (argc != 4) {
        std::cerr << "Usage: ./cub-sort-test <num_keys> <gpu_id> <bit_entropy>" << std::endl;
        return -1;
    }

    size_t num_keys = std::stoull(argv[1]);
    int gpu = std::stoi(argv[2]);
    size_t bit_entropy = std::stoi(argv[3]);

    cudaStream_t stream;
    CheckCudaError(cudaSetDevice(gpu));
    CheckCudaError(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));

    PinnedHostVector<uint32_t> keys(num_keys);

#pragma omp parallel num_threads(64)
    {
        uint32_t max = (1 << bit_entropy) - 1;
  
      if (bit_entropy == sizeof(uint32_t) * 8) {
        max = std::numeric_limits<uint32_t>::max();
      } else if (bit_entropy == 1) {
        max = 2;
      }
  
      std::mt19937 random_generator = SeedRandomGenerator(2147483647 + static_cast<size_t>(omp_get_thread_num()));
      std::uniform_real_distribution<double> uniform_dist(0, max);
  
#pragma omp for schedule(static)
      for (size_t i = 0; i < num_keys; ++i) {
        keys[i] = static_cast<uint32_t>(uniform_dist(random_generator));
      }
    }

    thrust::device_vector<uint32_t> device_vector(num_keys);
    thrust::copy(keys.begin(), keys.end(), device_vector.begin());

    CheckCudaError(cudaDeviceSynchronize());

    size_t num_temporary_bytes = 0;
    cub::DeviceRadixSort::SortKeys(
        NULL, num_temporary_bytes, thrust::raw_pointer_cast(device_vector.data()),
        thrust::raw_pointer_cast(device_vector.data()), num_keys, 0, bit_entropy + 1, stream); // bit subrange is [begin_bit, end_bit), thus bit_entropy + 1

    uint8_t* temporary_storage = nullptr;
    CheckCudaError(cudaMalloc(reinterpret_cast<void**>(&temporary_storage), num_temporary_bytes));

    cub::DeviceRadixSort::SortKeys(
    (void*)temporary_storage, num_temporary_bytes, thrust::raw_pointer_cast(device_vector.data()),
    thrust::raw_pointer_cast(device_vector.data()), num_keys, 0, bit_entropy + 1, stream);

    CheckCudaError(cudaStreamSynchronize(stream));

    thrust::copy(device_vector.begin(), device_vector.end(), keys.begin());

    CheckCudaError(cudaFree(temporary_storage));

    if (std::is_sorted(keys.begin(), keys.end()) == false) {
        std::cout << "Error: Sorting failed." << std::endl;
    }

    return 0;
}

最佳答案

您的代码的问题是您没有使用 SortKeys正确。 SortKeys不能就地工作。您需要为排序后的数据提供单独的输出缓冲区。

#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>

#include <algorithm>
#include <chrono>
#include <iostream>
#include <parallel/algorithm>
#include <random>
#include <vector>
#include <iostream>

#define DEBUG

#ifdef DEBUG
#define CheckCudaError(instruction) \
  { AssertNoCudaError((instruction), __FILE__, __LINE__); }
#else
#define CheckCudaError(instruction) instruction
#endif

inline void AssertNoCudaError(cudaError_t error_code, const char* file, int line) {
  if (error_code != cudaSuccess) {
    std::cout << "Error: " << cudaGetErrorString(error_code) << " " << file << " " << line << "\n";
  }
}

template <typename T>
using PinnedHostVector = thrust::host_vector<T, thrust::system::cuda::experimental::pinned_allocator<T>>;

std::mt19937 SeedRandomGenerator(uint32_t distribution_seed) {
    const size_t seeds_bytes = sizeof(std::mt19937::result_type) * std::mt19937::state_size;
    const size_t seeds_length = seeds_bytes / sizeof(std::seed_seq::result_type);

    std::vector<std::seed_seq::result_type> seeds(seeds_length);
    std::generate(seeds.begin(), seeds.end(), [&]() {
        distribution_seed = (distribution_seed << 1) | (distribution_seed >> (-1 & 31));
        return distribution_seed;
    });
    std::seed_seq seed_sequence(seeds.begin(), seeds.end());

    return std::mt19937{seed_sequence};
}

int main(int argc, char* argv[]) {

    if (argc != 4) {
        std::cerr << "Usage: ./cub-sort-test <num_keys> <gpu_id> <bit_entropy>" << std::endl;
        return -1;
    }

    size_t num_keys = std::stoull(argv[1]);
    int gpu = std::stoi(argv[2]);
    size_t bit_entropy = std::stoi(argv[3]);

    cudaStream_t stream;
    CheckCudaError(cudaSetDevice(gpu));
    CheckCudaError(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));

    PinnedHostVector<uint32_t> keys(num_keys);

#pragma omp parallel num_threads(64)
    {
        uint32_t max = (1 << bit_entropy) - 1;
  
      if (bit_entropy == sizeof(uint32_t) * 8) {
        max = std::numeric_limits<uint32_t>::max();
      } else if (bit_entropy == 1) {
        max = 2;
      }
  
      std::mt19937 random_generator = SeedRandomGenerator(2147483647 + static_cast<size_t>(omp_get_thread_num()));
      std::uniform_real_distribution<double> uniform_dist(0, max);
  
#pragma omp for schedule(static)
      for (size_t i = 0; i < num_keys; ++i) {
        keys[i] = static_cast<uint32_t>(uniform_dist(random_generator));
      }
    }

    thrust::device_vector<uint32_t> device_vector(num_keys);
    thrust::copy(keys.begin(), keys.end(), device_vector.begin());

    thrust::device_vector<uint32_t> device_vector_sorted(num_keys);

    CheckCudaError(cudaDeviceSynchronize());

    size_t num_temporary_bytes = 0;
    cub::DeviceRadixSort::SortKeys(
        NULL, num_temporary_bytes, thrust::raw_pointer_cast(device_vector.data()),
        thrust::raw_pointer_cast(device_vector_sorted.data()), num_keys, 0, bit_entropy + 1, stream); // bit subrange is [begin_bit, end_bit), thus bit_entropy + 1

    uint8_t* temporary_storage = nullptr;
    CheckCudaError(cudaMalloc(reinterpret_cast<void**>(&temporary_storage), num_temporary_bytes));

    cub::DeviceRadixSort::SortKeys(
    (void*)temporary_storage, num_temporary_bytes, thrust::raw_pointer_cast(device_vector.data()),
    thrust::raw_pointer_cast(device_vector_sorted.data()), num_keys, 0, bit_entropy + 1, stream);

    CheckCudaError(cudaStreamSynchronize(stream));

    thrust::copy(device_vector_sorted.begin(), device_vector_sorted.end(), keys.begin());

    CheckCudaError(cudaFree(temporary_storage));

    if (std::is_sorted(keys.begin(), keys.end()) == false) {
        std::cout << "Error: Sorting failed." << std::endl;
    }

    return 0;
}

如果未排序的数组在排序后不再使用并且可以被覆盖,我建议使用需要 DoubleBuffer<Keys> 的重载。以减少内存使用。否则,将分配一个临时键数组,因为 const Key*输入不能被覆盖。

关于cuda - 指定结束位时 cub::DeviceRadixSort 失败,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/71285448/

相关文章:

cuda - 在 CUDA 中分配设备变量时出现问题

CUDA 放置新函数和虚拟函数

cuda - 如何在没有主机编译器的情况下创建 Cuda 模块

python - Windows 中 Tensorflow 的 cuda_dnn 错误 : "could not create cudnn handle: CUDNN_STATUS_NOT_INITIALIZED"

opencl - OpenCL 中的重叠传输和设备计算

cuda - 我可以将 CUDA 与非 NVIDIA GPU 一起使用吗?

c++ - 数组结构与结构数组

linux - 如何在 Linux 中确定显示器/电视的电源状态

gpu - 有没有办法了解供应商特定的 GPU 光栅化差异?

CUDA : Copy non-zero values position of a vector to another