single_stream.cu 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. // Copyright Oliver Kowalke 2017.
  2. // Distributed under the Boost Software License, Version 1.0.
  3. // (See accompanying file LICENSE_1_0.txt or copy at
  4. // http://www.boost.org/LICENSE_1_0.txt)
  5. #include <chrono>
  6. #include <cstdlib>
  7. #include <iostream>
  8. #include <memory>
  9. #include <random>
  10. #include <tuple>
  11. #include <cuda.h>
  12. #include <boost/assert.hpp>
  13. #include <boost/bind.hpp>
  14. #include <boost/intrusive_ptr.hpp>
  15. #include <boost/fiber/all.hpp>
  16. #include <boost/fiber/cuda/waitfor.hpp>
  17. __global__
  18. void vector_add( int * a, int * b, int * c, int size) {
  19. int idx = threadIdx.x + blockIdx.x * blockDim.x;
  20. if ( idx < size) {
  21. c[idx] = a[idx] + b[idx];
  22. }
  23. }
  24. int main() {
  25. try {
  26. bool done = false;
  27. boost::fibers::fiber f1([&done]{
  28. std::cout << "f1: entered" << std::endl;
  29. try {
  30. cudaStream_t stream;
  31. cudaStreamCreate( & stream);
  32. int size = 1024 * 1024;
  33. int full_size = 20 * size;
  34. int * host_a, * host_b, * host_c;
  35. cudaHostAlloc( & host_a, full_size * sizeof( int), cudaHostAllocDefault);
  36. cudaHostAlloc( & host_b, full_size * sizeof( int), cudaHostAllocDefault);
  37. cudaHostAlloc( & host_c, full_size * sizeof( int), cudaHostAllocDefault);
  38. int * dev_a, * dev_b, * dev_c;
  39. cudaMalloc( & dev_a, size * sizeof( int) );
  40. cudaMalloc( & dev_b, size * sizeof( int) );
  41. cudaMalloc( & dev_c, size * sizeof( int) );
  42. std::minstd_rand generator;
  43. std::uniform_int_distribution<> distribution(1, 6);
  44. for ( int i = 0; i < full_size; ++i) {
  45. host_a[i] = distribution( generator);
  46. host_b[i] = distribution( generator);
  47. }
  48. for ( int i = 0; i < full_size; i += size) {
  49. cudaMemcpyAsync( dev_a, host_a + i, size * sizeof( int), cudaMemcpyHostToDevice, stream);
  50. cudaMemcpyAsync( dev_b, host_b + i, size * sizeof( int), cudaMemcpyHostToDevice, stream);
  51. vector_add<<< size / 256, 256, 0, stream >>>( dev_a, dev_b, dev_c, size);
  52. cudaMemcpyAsync( host_c + i, dev_c, size * sizeof( int), cudaMemcpyDeviceToHost, stream);
  53. }
  54. auto result = boost::fibers::cuda::waitfor_all( stream);
  55. BOOST_ASSERT( stream == std::get< 0 >( result) );
  56. BOOST_ASSERT( cudaSuccess == std::get< 1 >( result) );
  57. std::cout << "f1: GPU computation finished" << std::endl;
  58. cudaFreeHost( host_a);
  59. cudaFreeHost( host_b);
  60. cudaFreeHost( host_c);
  61. cudaFree( dev_a);
  62. cudaFree( dev_b);
  63. cudaFree( dev_c);
  64. cudaStreamDestroy( stream);
  65. done = true;
  66. } catch ( std::exception const& ex) {
  67. std::cerr << "exception: " << ex.what() << std::endl;
  68. }
  69. std::cout << "f1: leaving" << std::endl;
  70. });
  71. boost::fibers::fiber f2([&done]{
  72. std::cout << "f2: entered" << std::endl;
  73. while ( ! done) {
  74. std::cout << "f2: sleeping" << std::endl;
  75. boost::this_fiber::sleep_for( std::chrono::milliseconds( 1 ) );
  76. }
  77. std::cout << "f2: leaving" << std::endl;
  78. });
  79. f1.join();
  80. f2.join();
  81. std::cout << "done." << std::endl;
  82. return EXIT_SUCCESS;
  83. } catch ( std::exception const& e) {
  84. std::cerr << "exception: " << e.what() << std::endl;
  85. } catch (...) {
  86. std::cerr << "unhandled exception" << std::endl;
  87. }
  88. return EXIT_FAILURE;
  89. }