cuda.qbk 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. [/
  2. Copyright Oliver Kowalke 2017.
  3. Distributed under the Boost Software License, Version 1.0.
  4. (See accompanying file LICENSE_1_0.txt or copy at
  5. http://www.boost.org/LICENSE_1_0.txt
  6. ]
  7. [#cuda]
  8. [section:cuda CUDA]
  9. [@http://developer.nvidia.com/cuda-zone/ CUDA (Compute Unified Device Architecture)] is a platform for parallel computing
  10. on NVIDIA GPUs. The application programming interface of CUDA gives access to
  11. GPU's instruction set and computation resources (Execution of compute kernels).
  12. [heading Synchronization with CUDA streams]
  13. CUDA operation such as compute kernels or memory transfer (between host and
  14. device) can be grouped/queued by CUDA streams. are executed on the GPUs.
  15. Boost.Fiber enables a fiber to sleep (suspend) till a CUDA stream has completed
  16. its operations. This enables applications to run other fibers on the CPU without
  17. the need to spawn an additional OS-threads. And resume the fiber when the CUDA
  18. streams has finished.
  19. __global__
  20. void kernel( int size, int * a, int * b, int * c) {
  21. int idx = threadIdx.x + blockIdx.x * blockDim.x;
  22. if ( idx < size) {
  23. int idx1 = (idx + 1) % 256;
  24. int idx2 = (idx + 2) % 256;
  25. float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
  26. float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
  27. c[idx] = (as + bs) / 2;
  28. }
  29. }
  30. boost::fibers::fiber f([&done]{
  31. cudaStream_t stream;
  32. cudaStreamCreate( & stream);
  33. int size = 1024 * 1024;
  34. int full_size = 20 * size;
  35. int * host_a, * host_b, * host_c;
  36. cudaHostAlloc( & host_a, full_size * sizeof( int), cudaHostAllocDefault);
  37. cudaHostAlloc( & host_b, full_size * sizeof( int), cudaHostAllocDefault);
  38. cudaHostAlloc( & host_c, full_size * sizeof( int), cudaHostAllocDefault);
  39. int * dev_a, * dev_b, * dev_c;
  40. cudaMalloc( & dev_a, size * sizeof( int) );
  41. cudaMalloc( & dev_b, size * sizeof( int) );
  42. cudaMalloc( & dev_c, size * sizeof( int) );
  43. std::minstd_rand generator;
  44. std::uniform_int_distribution<> distribution(1, 6);
  45. for ( int i = 0; i < full_size; ++i) {
  46. host_a[i] = distribution( generator);
  47. host_b[i] = distribution( generator);
  48. }
  49. for ( int i = 0; i < full_size; i += size) {
  50. cudaMemcpyAsync( dev_a, host_a + i, size * sizeof( int), cudaMemcpyHostToDevice, stream);
  51. cudaMemcpyAsync( dev_b, host_b + i, size * sizeof( int), cudaMemcpyHostToDevice, stream);
  52. kernel<<< size / 256, 256, 0, stream >>>( size, dev_a, dev_b, dev_c);
  53. cudaMemcpyAsync( host_c + i, dev_c, size * sizeof( int), cudaMemcpyDeviceToHost, stream);
  54. }
  55. auto result = boost::fibers::cuda::waitfor_all( stream); // suspend fiber till CUDA stream has finished
  56. BOOST_ASSERT( stream == std::get< 0 >( result) );
  57. BOOST_ASSERT( cudaSuccess == std::get< 1 >( result) );
  58. std::cout << "f1: GPU computation finished" << std::endl;
  59. cudaFreeHost( host_a);
  60. cudaFreeHost( host_b);
  61. cudaFreeHost( host_c);
  62. cudaFree( dev_a);
  63. cudaFree( dev_b);
  64. cudaFree( dev_c);
  65. cudaStreamDestroy( stream);
  66. });
  67. f.join();
  68. [heading Synopsis]
  69. #include <boost/fiber/cuda/waitfor.hpp>
  70. namespace boost {
  71. namespace fibers {
  72. namespace cuda {
  73. std::tuple< cudaStream_t, cudaError_t > waitfor_all( cudaStream_t st);
  74. std::vector< std::tuple< cudaStream_t, cudaError_t > > waitfor_all( cudaStream_t ... st);
  75. }}}
  76. [ns_function_heading cuda..waitfor]
  77. #include <boost/fiber/cuda/waitfor.hpp>
  78. namespace boost {
  79. namespace fibers {
  80. namespace cuda {
  81. std::tuple< cudaStream_t, cudaError_t > waitfor_all( cudaStream_t st);
  82. std::vector< std::tuple< cudaStream_t, cudaError_t > > waitfor_all( cudaStream_t ... st);
  83. }}}
  84. [variablelist
  85. [[Effects:] [Suspends active fiber till CUDA stream has finished its operations.]]
  86. [[Returns:] [tuple of stream reference and the CUDA stream status]]
  87. ]
  88. [endsect]