scheduler.ipp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. //
  2. // detail/impl/scheduler.ipp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
  11. #define BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include <boost/asio/detail/config.hpp>
  16. #include <boost/asio/detail/concurrency_hint.hpp>
  17. #include <boost/asio/detail/event.hpp>
  18. #include <boost/asio/detail/limits.hpp>
  19. #include <boost/asio/detail/reactor.hpp>
  20. #include <boost/asio/detail/scheduler.hpp>
  21. #include <boost/asio/detail/scheduler_thread_info.hpp>
  22. #include <boost/asio/detail/signal_blocker.hpp>
  23. #include <boost/asio/detail/push_options.hpp>
  24. namespace boost {
  25. namespace asio {
  26. namespace detail {
  27. class scheduler::thread_function
  28. {
  29. public:
  30. explicit thread_function(scheduler* s)
  31. : this_(s)
  32. {
  33. }
  34. void operator()()
  35. {
  36. boost::system::error_code ec;
  37. this_->run(ec);
  38. }
  39. private:
  40. scheduler* this_;
  41. };
  42. struct scheduler::task_cleanup
  43. {
  44. ~task_cleanup()
  45. {
  46. if (this_thread_->private_outstanding_work > 0)
  47. {
  48. boost::asio::detail::increment(
  49. scheduler_->outstanding_work_,
  50. this_thread_->private_outstanding_work);
  51. }
  52. this_thread_->private_outstanding_work = 0;
  53. // Enqueue the completed operations and reinsert the task at the end of
  54. // the operation queue.
  55. lock_->lock();
  56. scheduler_->task_interrupted_ = true;
  57. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  58. scheduler_->op_queue_.push(&scheduler_->task_operation_);
  59. }
  60. scheduler* scheduler_;
  61. mutex::scoped_lock* lock_;
  62. thread_info* this_thread_;
  63. };
  64. struct scheduler::work_cleanup
  65. {
  66. ~work_cleanup()
  67. {
  68. if (this_thread_->private_outstanding_work > 1)
  69. {
  70. boost::asio::detail::increment(
  71. scheduler_->outstanding_work_,
  72. this_thread_->private_outstanding_work - 1);
  73. }
  74. else if (this_thread_->private_outstanding_work < 1)
  75. {
  76. scheduler_->work_finished();
  77. }
  78. this_thread_->private_outstanding_work = 0;
  79. #if defined(BOOST_ASIO_HAS_THREADS)
  80. if (!this_thread_->private_op_queue.empty())
  81. {
  82. lock_->lock();
  83. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  84. }
  85. #endif // defined(BOOST_ASIO_HAS_THREADS)
  86. }
  87. scheduler* scheduler_;
  88. mutex::scoped_lock* lock_;
  89. thread_info* this_thread_;
  90. };
  91. scheduler::scheduler(boost::asio::execution_context& ctx,
  92. int concurrency_hint, bool own_thread)
  93. : boost::asio::detail::execution_context_service_base<scheduler>(ctx),
  94. one_thread_(concurrency_hint == 1
  95. || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
  96. SCHEDULER, concurrency_hint)
  97. || !BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
  98. REACTOR_IO, concurrency_hint)),
  99. mutex_(BOOST_ASIO_CONCURRENCY_HINT_IS_LOCKING(
  100. SCHEDULER, concurrency_hint)),
  101. task_(0),
  102. task_interrupted_(true),
  103. outstanding_work_(0),
  104. stopped_(false),
  105. shutdown_(false),
  106. concurrency_hint_(concurrency_hint),
  107. thread_(0)
  108. {
  109. BOOST_ASIO_HANDLER_TRACKING_INIT;
  110. if (own_thread)
  111. {
  112. ++outstanding_work_;
  113. boost::asio::detail::signal_blocker sb;
  114. thread_ = new boost::asio::detail::thread(thread_function(this));
  115. }
  116. }
  117. scheduler::~scheduler()
  118. {
  119. if (thread_)
  120. {
  121. thread_->join();
  122. delete thread_;
  123. }
  124. }
  125. void scheduler::shutdown()
  126. {
  127. mutex::scoped_lock lock(mutex_);
  128. shutdown_ = true;
  129. if (thread_)
  130. stop_all_threads(lock);
  131. lock.unlock();
  132. // Join thread to ensure task operation is returned to queue.
  133. if (thread_)
  134. {
  135. thread_->join();
  136. delete thread_;
  137. thread_ = 0;
  138. }
  139. // Destroy handler objects.
  140. while (!op_queue_.empty())
  141. {
  142. operation* o = op_queue_.front();
  143. op_queue_.pop();
  144. if (o != &task_operation_)
  145. o->destroy();
  146. }
  147. // Reset to initial state.
  148. task_ = 0;
  149. }
  150. void scheduler::init_task()
  151. {
  152. mutex::scoped_lock lock(mutex_);
  153. if (!shutdown_ && !task_)
  154. {
  155. task_ = &use_service<reactor>(this->context());
  156. op_queue_.push(&task_operation_);
  157. wake_one_thread_and_unlock(lock);
  158. }
  159. }
  160. std::size_t scheduler::run(boost::system::error_code& ec)
  161. {
  162. ec = boost::system::error_code();
  163. if (outstanding_work_ == 0)
  164. {
  165. stop();
  166. return 0;
  167. }
  168. thread_info this_thread;
  169. this_thread.private_outstanding_work = 0;
  170. thread_call_stack::context ctx(this, this_thread);
  171. mutex::scoped_lock lock(mutex_);
  172. std::size_t n = 0;
  173. for (; do_run_one(lock, this_thread, ec); lock.lock())
  174. if (n != (std::numeric_limits<std::size_t>::max)())
  175. ++n;
  176. return n;
  177. }
  178. std::size_t scheduler::run_one(boost::system::error_code& ec)
  179. {
  180. ec = boost::system::error_code();
  181. if (outstanding_work_ == 0)
  182. {
  183. stop();
  184. return 0;
  185. }
  186. thread_info this_thread;
  187. this_thread.private_outstanding_work = 0;
  188. thread_call_stack::context ctx(this, this_thread);
  189. mutex::scoped_lock lock(mutex_);
  190. return do_run_one(lock, this_thread, ec);
  191. }
  192. std::size_t scheduler::wait_one(long usec, boost::system::error_code& ec)
  193. {
  194. ec = boost::system::error_code();
  195. if (outstanding_work_ == 0)
  196. {
  197. stop();
  198. return 0;
  199. }
  200. thread_info this_thread;
  201. this_thread.private_outstanding_work = 0;
  202. thread_call_stack::context ctx(this, this_thread);
  203. mutex::scoped_lock lock(mutex_);
  204. return do_wait_one(lock, this_thread, usec, ec);
  205. }
  206. std::size_t scheduler::poll(boost::system::error_code& ec)
  207. {
  208. ec = boost::system::error_code();
  209. if (outstanding_work_ == 0)
  210. {
  211. stop();
  212. return 0;
  213. }
  214. thread_info this_thread;
  215. this_thread.private_outstanding_work = 0;
  216. thread_call_stack::context ctx(this, this_thread);
  217. mutex::scoped_lock lock(mutex_);
  218. #if defined(BOOST_ASIO_HAS_THREADS)
  219. // We want to support nested calls to poll() and poll_one(), so any handlers
  220. // that are already on a thread-private queue need to be put on to the main
  221. // queue now.
  222. if (one_thread_)
  223. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  224. op_queue_.push(outer_info->private_op_queue);
  225. #endif // defined(BOOST_ASIO_HAS_THREADS)
  226. std::size_t n = 0;
  227. for (; do_poll_one(lock, this_thread, ec); lock.lock())
  228. if (n != (std::numeric_limits<std::size_t>::max)())
  229. ++n;
  230. return n;
  231. }
  232. std::size_t scheduler::poll_one(boost::system::error_code& ec)
  233. {
  234. ec = boost::system::error_code();
  235. if (outstanding_work_ == 0)
  236. {
  237. stop();
  238. return 0;
  239. }
  240. thread_info this_thread;
  241. this_thread.private_outstanding_work = 0;
  242. thread_call_stack::context ctx(this, this_thread);
  243. mutex::scoped_lock lock(mutex_);
  244. #if defined(BOOST_ASIO_HAS_THREADS)
  245. // We want to support nested calls to poll() and poll_one(), so any handlers
  246. // that are already on a thread-private queue need to be put on to the main
  247. // queue now.
  248. if (one_thread_)
  249. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  250. op_queue_.push(outer_info->private_op_queue);
  251. #endif // defined(BOOST_ASIO_HAS_THREADS)
  252. return do_poll_one(lock, this_thread, ec);
  253. }
  254. void scheduler::stop()
  255. {
  256. mutex::scoped_lock lock(mutex_);
  257. stop_all_threads(lock);
  258. }
  259. bool scheduler::stopped() const
  260. {
  261. mutex::scoped_lock lock(mutex_);
  262. return stopped_;
  263. }
  264. void scheduler::restart()
  265. {
  266. mutex::scoped_lock lock(mutex_);
  267. stopped_ = false;
  268. }
  269. void scheduler::compensating_work_started()
  270. {
  271. thread_info_base* this_thread = thread_call_stack::contains(this);
  272. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  273. }
  274. void scheduler::post_immediate_completion(
  275. scheduler::operation* op, bool is_continuation)
  276. {
  277. #if defined(BOOST_ASIO_HAS_THREADS)
  278. if (one_thread_ || is_continuation)
  279. {
  280. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  281. {
  282. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  283. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  284. return;
  285. }
  286. }
  287. #else // defined(BOOST_ASIO_HAS_THREADS)
  288. (void)is_continuation;
  289. #endif // defined(BOOST_ASIO_HAS_THREADS)
  290. work_started();
  291. mutex::scoped_lock lock(mutex_);
  292. op_queue_.push(op);
  293. wake_one_thread_and_unlock(lock);
  294. }
  295. void scheduler::post_deferred_completion(scheduler::operation* op)
  296. {
  297. #if defined(BOOST_ASIO_HAS_THREADS)
  298. if (one_thread_)
  299. {
  300. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  301. {
  302. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  303. return;
  304. }
  305. }
  306. #endif // defined(BOOST_ASIO_HAS_THREADS)
  307. mutex::scoped_lock lock(mutex_);
  308. op_queue_.push(op);
  309. wake_one_thread_and_unlock(lock);
  310. }
  311. void scheduler::post_deferred_completions(
  312. op_queue<scheduler::operation>& ops)
  313. {
  314. if (!ops.empty())
  315. {
  316. #if defined(BOOST_ASIO_HAS_THREADS)
  317. if (one_thread_)
  318. {
  319. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  320. {
  321. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  322. return;
  323. }
  324. }
  325. #endif // defined(BOOST_ASIO_HAS_THREADS)
  326. mutex::scoped_lock lock(mutex_);
  327. op_queue_.push(ops);
  328. wake_one_thread_and_unlock(lock);
  329. }
  330. }
  331. void scheduler::do_dispatch(
  332. scheduler::operation* op)
  333. {
  334. work_started();
  335. mutex::scoped_lock lock(mutex_);
  336. op_queue_.push(op);
  337. wake_one_thread_and_unlock(lock);
  338. }
  339. void scheduler::abandon_operations(
  340. op_queue<scheduler::operation>& ops)
  341. {
  342. op_queue<scheduler::operation> ops2;
  343. ops2.push(ops);
  344. }
  345. std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
  346. scheduler::thread_info& this_thread,
  347. const boost::system::error_code& ec)
  348. {
  349. while (!stopped_)
  350. {
  351. if (!op_queue_.empty())
  352. {
  353. // Prepare to execute first handler from queue.
  354. operation* o = op_queue_.front();
  355. op_queue_.pop();
  356. bool more_handlers = (!op_queue_.empty());
  357. if (o == &task_operation_)
  358. {
  359. task_interrupted_ = more_handlers;
  360. if (more_handlers && !one_thread_)
  361. wakeup_event_.unlock_and_signal_one(lock);
  362. else
  363. lock.unlock();
  364. task_cleanup on_exit = { this, &lock, &this_thread };
  365. (void)on_exit;
  366. // Run the task. May throw an exception. Only block if the operation
  367. // queue is empty and we're not polling, otherwise we want to return
  368. // as soon as possible.
  369. task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
  370. }
  371. else
  372. {
  373. std::size_t task_result = o->task_result_;
  374. if (more_handlers && !one_thread_)
  375. wake_one_thread_and_unlock(lock);
  376. else
  377. lock.unlock();
  378. // Ensure the count of outstanding work is decremented on block exit.
  379. work_cleanup on_exit = { this, &lock, &this_thread };
  380. (void)on_exit;
  381. // Complete the operation. May throw an exception. Deletes the object.
  382. o->complete(this, ec, task_result);
  383. return 1;
  384. }
  385. }
  386. else
  387. {
  388. wakeup_event_.clear(lock);
  389. wakeup_event_.wait(lock);
  390. }
  391. }
  392. return 0;
  393. }
  394. std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
  395. scheduler::thread_info& this_thread, long usec,
  396. const boost::system::error_code& ec)
  397. {
  398. if (stopped_)
  399. return 0;
  400. operation* o = op_queue_.front();
  401. if (o == 0)
  402. {
  403. wakeup_event_.clear(lock);
  404. wakeup_event_.wait_for_usec(lock, usec);
  405. usec = 0; // Wait at most once.
  406. o = op_queue_.front();
  407. }
  408. if (o == &task_operation_)
  409. {
  410. op_queue_.pop();
  411. bool more_handlers = (!op_queue_.empty());
  412. task_interrupted_ = more_handlers;
  413. if (more_handlers && !one_thread_)
  414. wakeup_event_.unlock_and_signal_one(lock);
  415. else
  416. lock.unlock();
  417. {
  418. task_cleanup on_exit = { this, &lock, &this_thread };
  419. (void)on_exit;
  420. // Run the task. May throw an exception. Only block if the operation
  421. // queue is empty and we're not polling, otherwise we want to return
  422. // as soon as possible.
  423. task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
  424. }
  425. o = op_queue_.front();
  426. if (o == &task_operation_)
  427. {
  428. if (!one_thread_)
  429. wakeup_event_.maybe_unlock_and_signal_one(lock);
  430. return 0;
  431. }
  432. }
  433. if (o == 0)
  434. return 0;
  435. op_queue_.pop();
  436. bool more_handlers = (!op_queue_.empty());
  437. std::size_t task_result = o->task_result_;
  438. if (more_handlers && !one_thread_)
  439. wake_one_thread_and_unlock(lock);
  440. else
  441. lock.unlock();
  442. // Ensure the count of outstanding work is decremented on block exit.
  443. work_cleanup on_exit = { this, &lock, &this_thread };
  444. (void)on_exit;
  445. // Complete the operation. May throw an exception. Deletes the object.
  446. o->complete(this, ec, task_result);
  447. return 1;
  448. }
  449. std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
  450. scheduler::thread_info& this_thread,
  451. const boost::system::error_code& ec)
  452. {
  453. if (stopped_)
  454. return 0;
  455. operation* o = op_queue_.front();
  456. if (o == &task_operation_)
  457. {
  458. op_queue_.pop();
  459. lock.unlock();
  460. {
  461. task_cleanup c = { this, &lock, &this_thread };
  462. (void)c;
  463. // Run the task. May throw an exception. Only block if the operation
  464. // queue is empty and we're not polling, otherwise we want to return
  465. // as soon as possible.
  466. task_->run(0, this_thread.private_op_queue);
  467. }
  468. o = op_queue_.front();
  469. if (o == &task_operation_)
  470. {
  471. wakeup_event_.maybe_unlock_and_signal_one(lock);
  472. return 0;
  473. }
  474. }
  475. if (o == 0)
  476. return 0;
  477. op_queue_.pop();
  478. bool more_handlers = (!op_queue_.empty());
  479. std::size_t task_result = o->task_result_;
  480. if (more_handlers && !one_thread_)
  481. wake_one_thread_and_unlock(lock);
  482. else
  483. lock.unlock();
  484. // Ensure the count of outstanding work is decremented on block exit.
  485. work_cleanup on_exit = { this, &lock, &this_thread };
  486. (void)on_exit;
  487. // Complete the operation. May throw an exception. Deletes the object.
  488. o->complete(this, ec, task_result);
  489. return 1;
  490. }
  491. void scheduler::stop_all_threads(
  492. mutex::scoped_lock& lock)
  493. {
  494. stopped_ = true;
  495. wakeup_event_.signal_all(lock);
  496. if (!task_interrupted_ && task_)
  497. {
  498. task_interrupted_ = true;
  499. task_->interrupt();
  500. }
  501. }
  502. void scheduler::wake_one_thread_and_unlock(
  503. mutex::scoped_lock& lock)
  504. {
  505. if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
  506. {
  507. if (!task_interrupted_ && task_)
  508. {
  509. task_interrupted_ = true;
  510. task_->interrupt();
  511. }
  512. lock.unlock();
  513. }
  514. }
  515. } // namespace detail
  516. } // namespace asio
  517. } // namespace boost
  518. #include <boost/asio/detail/pop_options.hpp>
  519. #endif // BOOST_ASIO_DETAIL_IMPL_SCHEDULER_IPP