naive_monte_carlo.hpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /*
  2. * Copyright Nick Thompson, 2018
  3. * Use, modification and distribution are subject to the
  4. * Boost Software License, Version 1.0. (See accompanying file
  5. * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. */
  7. #ifndef BOOST_MATH_QUADRATURE_NAIVE_MONTE_CARLO_HPP
  8. #define BOOST_MATH_QUADRATURE_NAIVE_MONTE_CARLO_HPP
  9. #include <sstream>
  10. #include <algorithm>
  11. #include <vector>
  12. #include <boost/atomic.hpp>
  13. #include <functional>
  14. #include <future>
  15. #include <thread>
  16. #include <initializer_list>
  17. #include <utility>
  18. #include <random>
  19. #include <chrono>
  20. #include <map>
  21. #include <boost/math/policies/error_handling.hpp>
  22. namespace boost { namespace math { namespace quadrature {
  23. namespace detail {
  24. enum class limit_classification {FINITE,
  25. LOWER_BOUND_INFINITE,
  26. UPPER_BOUND_INFINITE,
  27. DOUBLE_INFINITE};
  28. }
  29. template<class Real, class F, class RandomNumberGenerator = std::mt19937_64, class Policy = boost::math::policies::policy<>>
  30. class naive_monte_carlo
  31. {
  32. public:
  33. naive_monte_carlo(const F& integrand,
  34. std::vector<std::pair<Real, Real>> const & bounds,
  35. Real error_goal,
  36. bool singular = true,
  37. uint64_t threads = std::thread::hardware_concurrency(),
  38. uint64_t seed = 0): m_num_threads{threads}, m_seed{seed}
  39. {
  40. using std::numeric_limits;
  41. using std::sqrt;
  42. uint64_t n = bounds.size();
  43. m_lbs.resize(n);
  44. m_dxs.resize(n);
  45. m_limit_types.resize(n);
  46. m_volume = 1;
  47. static const char* function = "boost::math::quadrature::naive_monte_carlo<%1%>";
  48. for (uint64_t i = 0; i < n; ++i)
  49. {
  50. if (bounds[i].second <= bounds[i].first)
  51. {
  52. boost::math::policies::raise_domain_error(function, "The upper bound is <= the lower bound.\n", bounds[i].second, Policy());
  53. return;
  54. }
  55. if (bounds[i].first == -numeric_limits<Real>::infinity())
  56. {
  57. if (bounds[i].second == numeric_limits<Real>::infinity())
  58. {
  59. m_limit_types[i] = detail::limit_classification::DOUBLE_INFINITE;
  60. }
  61. else
  62. {
  63. m_limit_types[i] = detail::limit_classification::LOWER_BOUND_INFINITE;
  64. // Ok ok this is bad to use the second bound as the lower limit and then reflect.
  65. m_lbs[i] = bounds[i].second;
  66. m_dxs[i] = numeric_limits<Real>::quiet_NaN();
  67. }
  68. }
  69. else if (bounds[i].second == numeric_limits<Real>::infinity())
  70. {
  71. m_limit_types[i] = detail::limit_classification::UPPER_BOUND_INFINITE;
  72. if (singular)
  73. {
  74. // I've found that it's easier to sample on a closed set and perturb the boundary
  75. // than to try to sample very close to the boundary.
  76. m_lbs[i] = std::nextafter(bounds[i].first, (std::numeric_limits<Real>::max)());
  77. }
  78. else
  79. {
  80. m_lbs[i] = bounds[i].first;
  81. }
  82. m_dxs[i] = numeric_limits<Real>::quiet_NaN();
  83. }
  84. else
  85. {
  86. m_limit_types[i] = detail::limit_classification::FINITE;
  87. if (singular)
  88. {
  89. if (bounds[i].first == 0)
  90. {
  91. m_lbs[i] = std::numeric_limits<Real>::epsilon();
  92. }
  93. else
  94. {
  95. m_lbs[i] = std::nextafter(bounds[i].first, (std::numeric_limits<Real>::max)());
  96. }
  97. m_dxs[i] = std::nextafter(bounds[i].second, std::numeric_limits<Real>::lowest()) - m_lbs[i];
  98. }
  99. else
  100. {
  101. m_lbs[i] = bounds[i].first;
  102. m_dxs[i] = bounds[i].second - bounds[i].first;
  103. }
  104. m_volume *= m_dxs[i];
  105. }
  106. }
  107. m_integrand = [this, &integrand](std::vector<Real> & x)->Real
  108. {
  109. Real coeff = m_volume;
  110. for (uint64_t i = 0; i < x.size(); ++i)
  111. {
  112. // Variable transformation are listed at:
  113. // https://en.wikipedia.org/wiki/Numerical_integration
  114. // However, we've made some changes to these so that we can evaluate on a compact domain.
  115. if (m_limit_types[i] == detail::limit_classification::FINITE)
  116. {
  117. x[i] = m_lbs[i] + x[i]*m_dxs[i];
  118. }
  119. else if (m_limit_types[i] == detail::limit_classification::UPPER_BOUND_INFINITE)
  120. {
  121. Real t = x[i];
  122. Real z = 1/(1 + numeric_limits<Real>::epsilon() - t);
  123. coeff *= (z*z)*(1 + numeric_limits<Real>::epsilon());
  124. x[i] = m_lbs[i] + t*z;
  125. }
  126. else if (m_limit_types[i] == detail::limit_classification::LOWER_BOUND_INFINITE)
  127. {
  128. Real t = x[i];
  129. Real z = 1/(t+sqrt((numeric_limits<Real>::min)()));
  130. coeff *= (z*z);
  131. x[i] = m_lbs[i] + (t-1)*z;
  132. }
  133. else
  134. {
  135. Real t1 = 1/(1+numeric_limits<Real>::epsilon() - x[i]);
  136. Real t2 = 1/(x[i]+numeric_limits<Real>::epsilon());
  137. x[i] = (2*x[i]-1)*t1*t2/4;
  138. coeff *= (t1*t1+t2*t2)/4;
  139. }
  140. }
  141. return coeff*integrand(x);
  142. };
  143. // If we don't do a single function call in the constructor,
  144. // we can't do a restart.
  145. std::vector<Real> x(m_lbs.size());
  146. // If the seed is zero, that tells us to choose a random seed for the user:
  147. if (seed == 0)
  148. {
  149. std::random_device rd;
  150. seed = rd();
  151. }
  152. RandomNumberGenerator gen(seed);
  153. Real inv_denom = 1/static_cast<Real>(((gen.max)()-(gen.min)()));
  154. m_num_threads = (std::max)(m_num_threads, (uint64_t) 1);
  155. m_thread_calls.reset(new boost::atomic<uint64_t>[threads]);
  156. m_thread_Ss.reset(new boost::atomic<Real>[threads]);
  157. m_thread_averages.reset(new boost::atomic<Real>[threads]);
  158. Real avg = 0;
  159. for (uint64_t i = 0; i < m_num_threads; ++i)
  160. {
  161. for (uint64_t j = 0; j < m_lbs.size(); ++j)
  162. {
  163. x[j] = (gen()-(gen.min)())*inv_denom;
  164. }
  165. Real y = m_integrand(x);
  166. m_thread_averages[i] = y; // relaxed store
  167. m_thread_calls[i] = 1;
  168. m_thread_Ss[i] = 0;
  169. avg += y;
  170. }
  171. avg /= m_num_threads;
  172. m_avg = avg; // relaxed store
  173. m_error_goal = error_goal; // relaxed store
  174. m_start = std::chrono::system_clock::now();
  175. m_done = false; // relaxed store
  176. m_total_calls = m_num_threads; // relaxed store
  177. m_variance = (numeric_limits<Real>::max)();
  178. }
  179. std::future<Real> integrate()
  180. {
  181. // Set done to false in case we wish to restart:
  182. m_done.store(false); // relaxed store, no worker threads yet
  183. m_start = std::chrono::system_clock::now();
  184. return std::async(std::launch::async,
  185. &naive_monte_carlo::m_integrate, this);
  186. }
  187. void cancel()
  188. {
  189. // If seed = 0 (meaning have the routine pick the seed), this leaves the seed the same.
  190. // If seed != 0, then the seed is changed, so a restart doesn't do the exact same thing.
  191. m_seed = m_seed*m_seed;
  192. m_done = true; // relaxed store, worker threads will get the message eventually
  193. // Make sure the error goal is infinite, because otherwise we'll loop when we do the final error goal check:
  194. m_error_goal = (std::numeric_limits<Real>::max)();
  195. }
  196. Real variance() const
  197. {
  198. return m_variance.load();
  199. }
  200. Real current_error_estimate() const
  201. {
  202. using std::sqrt;
  203. //
  204. // There is a bug here: m_variance and m_total_calls get updated asynchronously
  205. // and may be out of synch when we compute the error estimate, not sure if it matters though...
  206. //
  207. return sqrt(m_variance.load()/m_total_calls.load());
  208. }
  209. std::chrono::duration<Real> estimated_time_to_completion() const
  210. {
  211. auto now = std::chrono::system_clock::now();
  212. std::chrono::duration<Real> elapsed_seconds = now - m_start;
  213. Real r = this->current_error_estimate()/m_error_goal.load(); // relaxed load
  214. if (r*r <= 1) {
  215. return 0*elapsed_seconds;
  216. }
  217. return (r*r - 1)*elapsed_seconds;
  218. }
  219. void update_target_error(Real new_target_error)
  220. {
  221. m_error_goal = new_target_error; // relaxed store
  222. }
  223. Real progress() const
  224. {
  225. Real r = m_error_goal.load()/this->current_error_estimate(); // relaxed load
  226. if (r*r >= 1)
  227. {
  228. return 1;
  229. }
  230. return r*r;
  231. }
  232. Real current_estimate() const
  233. {
  234. return m_avg.load();
  235. }
  236. uint64_t calls() const
  237. {
  238. return m_total_calls.load(); // relaxed load
  239. }
  240. private:
  241. Real m_integrate()
  242. {
  243. uint64_t seed;
  244. // If the user tells us to pick a seed, pick a seed:
  245. if (m_seed == 0)
  246. {
  247. std::random_device rd;
  248. seed = rd();
  249. }
  250. else // use the seed we are given:
  251. {
  252. seed = m_seed;
  253. }
  254. RandomNumberGenerator gen(seed);
  255. int max_repeat_tries = 5;
  256. do{
  257. if (max_repeat_tries < 5)
  258. {
  259. m_done = false;
  260. #ifdef BOOST_NAIVE_MONTE_CARLO_DEBUG_FAILURES
  261. std::cout << "Failed to achieve required tolerance first time through..\n";
  262. std::cout << " variance = " << m_variance << std::endl;
  263. std::cout << " average = " << m_avg << std::endl;
  264. std::cout << " total calls = " << m_total_calls << std::endl;
  265. for (std::size_t i = 0; i < m_num_threads; ++i)
  266. std::cout << " thread_calls[" << i << "] = " << m_thread_calls[i] << std::endl;
  267. for (std::size_t i = 0; i < m_num_threads; ++i)
  268. std::cout << " thread_averages[" << i << "] = " << m_thread_averages[i] << std::endl;
  269. for (std::size_t i = 0; i < m_num_threads; ++i)
  270. std::cout << " thread_Ss[" << i << "] = " << m_thread_Ss[i] << std::endl;
  271. #endif
  272. }
  273. std::vector<std::thread> threads(m_num_threads);
  274. for (uint64_t i = 0; i < threads.size(); ++i)
  275. {
  276. threads[i] = std::thread(&naive_monte_carlo::m_thread_monte, this, i, gen());
  277. }
  278. do {
  279. std::this_thread::sleep_for(std::chrono::milliseconds(100));
  280. uint64_t total_calls = 0;
  281. for (uint64_t i = 0; i < m_num_threads; ++i)
  282. {
  283. uint64_t t_calls = m_thread_calls[i].load(boost::memory_order::consume);
  284. total_calls += t_calls;
  285. }
  286. Real variance = 0;
  287. Real avg = 0;
  288. for (uint64_t i = 0; i < m_num_threads; ++i)
  289. {
  290. uint64_t t_calls = m_thread_calls[i].load(boost::memory_order::consume);
  291. // Will this overflow? Not hard to remove . . .
  292. avg += m_thread_averages[i].load(boost::memory_order::relaxed)*((Real)t_calls / (Real)total_calls);
  293. variance += m_thread_Ss[i].load(boost::memory_order::relaxed);
  294. }
  295. m_avg.store(avg, boost::memory_order::release);
  296. m_variance.store(variance / (total_calls - 1), boost::memory_order::release);
  297. m_total_calls = total_calls; // relaxed store, it's just for user feedback
  298. // Allow cancellation:
  299. if (m_done) // relaxed load
  300. {
  301. break;
  302. }
  303. } while (m_total_calls < 2048 || this->current_error_estimate() > m_error_goal.load(boost::memory_order::consume));
  304. // Error bound met; signal the threads:
  305. m_done = true; // relaxed store, threads will get the message in the end
  306. std::for_each(threads.begin(), threads.end(),
  307. std::mem_fn(&std::thread::join));
  308. if (m_exception)
  309. {
  310. std::rethrow_exception(m_exception);
  311. }
  312. // Incorporate their work into the final estimate:
  313. uint64_t total_calls = 0;
  314. for (uint64_t i = 0; i < m_num_threads; ++i)
  315. {
  316. uint64_t t_calls = m_thread_calls[i].load(boost::memory_order::consume);
  317. total_calls += t_calls;
  318. }
  319. Real variance = 0;
  320. Real avg = 0;
  321. for (uint64_t i = 0; i < m_num_threads; ++i)
  322. {
  323. uint64_t t_calls = m_thread_calls[i].load(boost::memory_order::consume);
  324. // Averages weighted by the number of calls the thread made:
  325. avg += m_thread_averages[i].load(boost::memory_order::relaxed)*((Real)t_calls / (Real)total_calls);
  326. variance += m_thread_Ss[i].load(boost::memory_order::relaxed);
  327. }
  328. m_avg.store(avg, boost::memory_order::release);
  329. m_variance.store(variance / (total_calls - 1), boost::memory_order::release);
  330. m_total_calls = total_calls; // relaxed store, this is just user feedback
  331. // Sometimes, the master will observe the variance at a very "good" (or bad?) moment,
  332. // Then the threads proceed to find the variance is much greater by the time they hear the message to stop.
  333. // This *WOULD* make sure that the final error estimate is within the error bounds.
  334. }
  335. while ((--max_repeat_tries >= 0) && (this->current_error_estimate() > m_error_goal));
  336. return m_avg.load(boost::memory_order::consume);
  337. }
  338. void m_thread_monte(uint64_t thread_index, uint64_t seed)
  339. {
  340. using std::numeric_limits;
  341. try
  342. {
  343. std::vector<Real> x(m_lbs.size());
  344. RandomNumberGenerator gen(seed);
  345. Real inv_denom = (Real) 1/(Real)( (gen.max)() - (gen.min)() );
  346. Real M1 = m_thread_averages[thread_index].load(boost::memory_order::consume);
  347. Real S = m_thread_Ss[thread_index].load(boost::memory_order::consume);
  348. // Kahan summation is required or the value of the integrand will go on a random walk during long computations.
  349. // See the implementation discussion.
  350. // The idea is that the unstabilized additions have error sigma(f)/sqrt(N) + epsilon*N, which diverges faster than it converges!
  351. // Kahan summation turns this to sigma(f)/sqrt(N) + epsilon^2*N, and the random walk occurs on a timescale of 10^14 years (on current hardware)
  352. Real compensator = 0;
  353. uint64_t k = m_thread_calls[thread_index].load(boost::memory_order::consume);
  354. while (!m_done) // relaxed load
  355. {
  356. int j = 0;
  357. // If we don't have a certain number of calls before an update, we can easily terminate prematurely
  358. // because the variance estimate is way too low. This magic number is a reasonable compromise, as 1/sqrt(2048) = 0.02,
  359. // so it should recover 2 digits if the integrand isn't poorly behaved, and if it is, it should discover that before premature termination.
  360. // Of course if the user has 64 threads, then this number is probably excessive.
  361. int magic_calls_before_update = 2048;
  362. while (j++ < magic_calls_before_update)
  363. {
  364. for (uint64_t i = 0; i < m_lbs.size(); ++i)
  365. {
  366. x[i] = (gen() - (gen.min)())*inv_denom;
  367. }
  368. Real f = m_integrand(x);
  369. using std::isfinite;
  370. if (!isfinite(f))
  371. {
  372. // The call to m_integrand transform x, so this error message states the correct node.
  373. std::stringstream os;
  374. os << "Your integrand was evaluated at {";
  375. for (uint64_t i = 0; i < x.size() -1; ++i)
  376. {
  377. os << x[i] << ", ";
  378. }
  379. os << x[x.size() -1] << "}, and returned " << f << std::endl;
  380. static const char* function = "boost::math::quadrature::naive_monte_carlo<%1%>";
  381. boost::math::policies::raise_domain_error(function, os.str().c_str(), /*this is a dummy arg to make it compile*/ 7.2, Policy());
  382. }
  383. ++k;
  384. Real term = (f - M1)/k;
  385. Real y1 = term - compensator;
  386. Real M2 = M1 + y1;
  387. compensator = (M2 - M1) - y1;
  388. S += (f - M1)*(f - M2);
  389. M1 = M2;
  390. }
  391. m_thread_averages[thread_index].store(M1, boost::memory_order::release);
  392. m_thread_Ss[thread_index].store(S, boost::memory_order::release);
  393. m_thread_calls[thread_index].store(k, boost::memory_order::release);
  394. }
  395. }
  396. catch (...)
  397. {
  398. // Signal the other threads that the computation is ruined:
  399. m_done = true; // relaxed store
  400. m_exception = std::current_exception();
  401. }
  402. }
  403. std::function<Real(std::vector<Real> &)> m_integrand;
  404. uint64_t m_num_threads;
  405. uint64_t m_seed;
  406. boost::atomic<Real> m_error_goal;
  407. boost::atomic<bool> m_done;
  408. std::vector<Real> m_lbs;
  409. std::vector<Real> m_dxs;
  410. std::vector<detail::limit_classification> m_limit_types;
  411. Real m_volume;
  412. boost::atomic<uint64_t> m_total_calls;
  413. // I wanted these to be vectors rather than maps,
  414. // but you can't resize a vector of atomics.
  415. std::unique_ptr<boost::atomic<uint64_t>[]> m_thread_calls;
  416. boost::atomic<Real> m_variance;
  417. std::unique_ptr<boost::atomic<Real>[]> m_thread_Ss;
  418. boost::atomic<Real> m_avg;
  419. std::unique_ptr<boost::atomic<Real>[]> m_thread_averages;
  420. std::chrono::time_point<std::chrono::system_clock> m_start;
  421. std::exception_ptr m_exception;
  422. };
  423. }}}
  424. #endif