spsc_queue.hpp 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. // lock-free single-producer/single-consumer ringbuffer
  2. // this algorithm is implemented in various projects (linux kernel)
  3. //
  4. // Copyright (C) 2009-2013 Tim Blechmann
  5. //
  6. // Distributed under the Boost Software License, Version 1.0. (See
  7. // accompanying file LICENSE_1_0.txt or copy at
  8. // http://www.boost.org/LICENSE_1_0.txt)
  9. #ifndef BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
  10. #define BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
  11. #include <algorithm>
  12. #include <memory>
  13. #include <boost/aligned_storage.hpp>
  14. #include <boost/assert.hpp>
  15. #include <boost/static_assert.hpp>
  16. #include <boost/utility.hpp>
  17. #include <boost/next_prior.hpp>
  18. #include <boost/utility/enable_if.hpp>
  19. #include <boost/config.hpp> // for BOOST_LIKELY
  20. #include <boost/type_traits/has_trivial_destructor.hpp>
  21. #include <boost/type_traits/is_convertible.hpp>
  22. #include <boost/lockfree/detail/allocator_rebind_helper.hpp>
  23. #include <boost/lockfree/detail/atomic.hpp>
  24. #include <boost/lockfree/detail/copy_payload.hpp>
  25. #include <boost/lockfree/detail/parameter.hpp>
  26. #include <boost/lockfree/detail/prefix.hpp>
  27. #include <boost/lockfree/lockfree_forward.hpp>
  28. #ifdef BOOST_HAS_PRAGMA_ONCE
  29. #pragma once
  30. #endif
  31. namespace boost {
  32. namespace lockfree {
  33. namespace detail {
  34. typedef parameter::parameters<boost::parameter::optional<tag::capacity>,
  35. boost::parameter::optional<tag::allocator>
  36. > ringbuffer_signature;
  37. template <typename T>
  38. class ringbuffer_base
  39. {
  40. #ifndef BOOST_DOXYGEN_INVOKED
  41. protected:
  42. typedef std::size_t size_t;
  43. static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t);
  44. atomic<size_t> write_index_;
  45. char padding1[padding_size]; /* force read_index and write_index to different cache lines */
  46. atomic<size_t> read_index_;
  47. BOOST_DELETED_FUNCTION(ringbuffer_base(ringbuffer_base const&))
  48. BOOST_DELETED_FUNCTION(ringbuffer_base& operator= (ringbuffer_base const&))
  49. protected:
  50. ringbuffer_base(void):
  51. write_index_(0), read_index_(0)
  52. {}
  53. static size_t next_index(size_t arg, size_t max_size)
  54. {
  55. size_t ret = arg + 1;
  56. while (BOOST_UNLIKELY(ret >= max_size))
  57. ret -= max_size;
  58. return ret;
  59. }
  60. static size_t read_available(size_t write_index, size_t read_index, size_t max_size)
  61. {
  62. if (write_index >= read_index)
  63. return write_index - read_index;
  64. const size_t ret = write_index + max_size - read_index;
  65. return ret;
  66. }
  67. static size_t write_available(size_t write_index, size_t read_index, size_t max_size)
  68. {
  69. size_t ret = read_index - write_index - 1;
  70. if (write_index >= read_index)
  71. ret += max_size;
  72. return ret;
  73. }
  74. size_t read_available(size_t max_size) const
  75. {
  76. size_t write_index = write_index_.load(memory_order_acquire);
  77. const size_t read_index = read_index_.load(memory_order_relaxed);
  78. return read_available(write_index, read_index, max_size);
  79. }
  80. size_t write_available(size_t max_size) const
  81. {
  82. size_t write_index = write_index_.load(memory_order_relaxed);
  83. const size_t read_index = read_index_.load(memory_order_acquire);
  84. return write_available(write_index, read_index, max_size);
  85. }
  86. bool push(T const & t, T * buffer, size_t max_size)
  87. {
  88. const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
  89. const size_t next = next_index(write_index, max_size);
  90. if (next == read_index_.load(memory_order_acquire))
  91. return false; /* ringbuffer is full */
  92. new (buffer + write_index) T(t); // copy-construct
  93. write_index_.store(next, memory_order_release);
  94. return true;
  95. }
  96. size_t push(const T * input_buffer, size_t input_count, T * internal_buffer, size_t max_size)
  97. {
  98. return push(input_buffer, input_buffer + input_count, internal_buffer, max_size) - input_buffer;
  99. }
  100. template <typename ConstIterator>
  101. ConstIterator push(ConstIterator begin, ConstIterator end, T * internal_buffer, size_t max_size)
  102. {
  103. // FIXME: avoid std::distance
  104. const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
  105. const size_t read_index = read_index_.load(memory_order_acquire);
  106. const size_t avail = write_available(write_index, read_index, max_size);
  107. if (avail == 0)
  108. return begin;
  109. size_t input_count = std::distance(begin, end);
  110. input_count = (std::min)(input_count, avail);
  111. size_t new_write_index = write_index + input_count;
  112. const ConstIterator last = boost::next(begin, input_count);
  113. if (write_index + input_count > max_size) {
  114. /* copy data in two sections */
  115. const size_t count0 = max_size - write_index;
  116. const ConstIterator midpoint = boost::next(begin, count0);
  117. std::uninitialized_copy(begin, midpoint, internal_buffer + write_index);
  118. std::uninitialized_copy(midpoint, last, internal_buffer);
  119. new_write_index -= max_size;
  120. } else {
  121. std::uninitialized_copy(begin, last, internal_buffer + write_index);
  122. if (new_write_index == max_size)
  123. new_write_index = 0;
  124. }
  125. write_index_.store(new_write_index, memory_order_release);
  126. return last;
  127. }
  128. template <typename Functor>
  129. bool consume_one(Functor & functor, T * buffer, size_t max_size)
  130. {
  131. const size_t write_index = write_index_.load(memory_order_acquire);
  132. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  133. if ( empty(write_index, read_index) )
  134. return false;
  135. T & object_to_consume = buffer[read_index];
  136. functor( object_to_consume );
  137. object_to_consume.~T();
  138. size_t next = next_index(read_index, max_size);
  139. read_index_.store(next, memory_order_release);
  140. return true;
  141. }
  142. template <typename Functor>
  143. bool consume_one(Functor const & functor, T * buffer, size_t max_size)
  144. {
  145. const size_t write_index = write_index_.load(memory_order_acquire);
  146. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  147. if ( empty(write_index, read_index) )
  148. return false;
  149. T & object_to_consume = buffer[read_index];
  150. functor( object_to_consume );
  151. object_to_consume.~T();
  152. size_t next = next_index(read_index, max_size);
  153. read_index_.store(next, memory_order_release);
  154. return true;
  155. }
  156. template <typename Functor>
  157. size_t consume_all (Functor const & functor, T * internal_buffer, size_t max_size)
  158. {
  159. const size_t write_index = write_index_.load(memory_order_acquire);
  160. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  161. const size_t avail = read_available(write_index, read_index, max_size);
  162. if (avail == 0)
  163. return 0;
  164. const size_t output_count = avail;
  165. size_t new_read_index = read_index + output_count;
  166. if (read_index + output_count > max_size) {
  167. /* copy data in two sections */
  168. const size_t count0 = max_size - read_index;
  169. const size_t count1 = output_count - count0;
  170. run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
  171. run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
  172. new_read_index -= max_size;
  173. } else {
  174. run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
  175. if (new_read_index == max_size)
  176. new_read_index = 0;
  177. }
  178. read_index_.store(new_read_index, memory_order_release);
  179. return output_count;
  180. }
  181. template <typename Functor>
  182. size_t consume_all (Functor & functor, T * internal_buffer, size_t max_size)
  183. {
  184. const size_t write_index = write_index_.load(memory_order_acquire);
  185. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  186. const size_t avail = read_available(write_index, read_index, max_size);
  187. if (avail == 0)
  188. return 0;
  189. const size_t output_count = avail;
  190. size_t new_read_index = read_index + output_count;
  191. if (read_index + output_count > max_size) {
  192. /* copy data in two sections */
  193. const size_t count0 = max_size - read_index;
  194. const size_t count1 = output_count - count0;
  195. run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
  196. run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
  197. new_read_index -= max_size;
  198. } else {
  199. run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
  200. if (new_read_index == max_size)
  201. new_read_index = 0;
  202. }
  203. read_index_.store(new_read_index, memory_order_release);
  204. return output_count;
  205. }
  206. size_t pop (T * output_buffer, size_t output_count, T * internal_buffer, size_t max_size)
  207. {
  208. const size_t write_index = write_index_.load(memory_order_acquire);
  209. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  210. const size_t avail = read_available(write_index, read_index, max_size);
  211. if (avail == 0)
  212. return 0;
  213. output_count = (std::min)(output_count, avail);
  214. size_t new_read_index = read_index + output_count;
  215. if (read_index + output_count > max_size) {
  216. /* copy data in two sections */
  217. const size_t count0 = max_size - read_index;
  218. const size_t count1 = output_count - count0;
  219. copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, output_buffer);
  220. copy_and_delete(internal_buffer, internal_buffer + count1, output_buffer + count0);
  221. new_read_index -= max_size;
  222. } else {
  223. copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer);
  224. if (new_read_index == max_size)
  225. new_read_index = 0;
  226. }
  227. read_index_.store(new_read_index, memory_order_release);
  228. return output_count;
  229. }
  230. template <typename OutputIterator>
  231. size_t pop_to_output_iterator (OutputIterator it, T * internal_buffer, size_t max_size)
  232. {
  233. const size_t write_index = write_index_.load(memory_order_acquire);
  234. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  235. const size_t avail = read_available(write_index, read_index, max_size);
  236. if (avail == 0)
  237. return 0;
  238. size_t new_read_index = read_index + avail;
  239. if (read_index + avail > max_size) {
  240. /* copy data in two sections */
  241. const size_t count0 = max_size - read_index;
  242. const size_t count1 = avail - count0;
  243. it = copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, it);
  244. copy_and_delete(internal_buffer, internal_buffer + count1, it);
  245. new_read_index -= max_size;
  246. } else {
  247. copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + avail, it);
  248. if (new_read_index == max_size)
  249. new_read_index = 0;
  250. }
  251. read_index_.store(new_read_index, memory_order_release);
  252. return avail;
  253. }
  254. const T& front(const T * internal_buffer) const
  255. {
  256. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  257. return *(internal_buffer + read_index);
  258. }
  259. T& front(T * internal_buffer)
  260. {
  261. const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
  262. return *(internal_buffer + read_index);
  263. }
  264. #endif
  265. public:
  266. /** reset the ringbuffer
  267. *
  268. * \note Not thread-safe
  269. * */
  270. void reset(void)
  271. {
  272. if ( !boost::has_trivial_destructor<T>::value ) {
  273. // make sure to call all destructors!
  274. T dummy_element;
  275. while (pop(dummy_element))
  276. {}
  277. } else {
  278. write_index_.store(0, memory_order_relaxed);
  279. read_index_.store(0, memory_order_release);
  280. }
  281. }
  282. /** Check if the ringbuffer is empty
  283. *
  284. * \return true, if the ringbuffer is empty, false otherwise
  285. * \note Due to the concurrent nature of the ringbuffer the result may be inaccurate.
  286. * */
  287. bool empty(void)
  288. {
  289. return empty(write_index_.load(memory_order_relaxed), read_index_.load(memory_order_relaxed));
  290. }
  291. /**
  292. * \return true, if implementation is lock-free.
  293. *
  294. * */
  295. bool is_lock_free(void) const
  296. {
  297. return write_index_.is_lock_free() && read_index_.is_lock_free();
  298. }
  299. private:
  300. bool empty(size_t write_index, size_t read_index)
  301. {
  302. return write_index == read_index;
  303. }
  304. template< class OutputIterator >
  305. OutputIterator copy_and_delete( T * first, T * last, OutputIterator out )
  306. {
  307. if (boost::has_trivial_destructor<T>::value) {
  308. return std::copy(first, last, out); // will use memcpy if possible
  309. } else {
  310. for (; first != last; ++first, ++out) {
  311. *out = *first;
  312. first->~T();
  313. }
  314. return out;
  315. }
  316. }
  317. template< class Functor >
  318. void run_functor_and_delete( T * first, T * last, Functor & functor )
  319. {
  320. for (; first != last; ++first) {
  321. functor(*first);
  322. first->~T();
  323. }
  324. }
  325. template< class Functor >
  326. void run_functor_and_delete( T * first, T * last, Functor const & functor )
  327. {
  328. for (; first != last; ++first) {
  329. functor(*first);
  330. first->~T();
  331. }
  332. }
  333. };
  334. template <typename T, std::size_t MaxSize>
  335. class compile_time_sized_ringbuffer:
  336. public ringbuffer_base<T>
  337. {
  338. typedef std::size_t size_type;
  339. static const std::size_t max_size = MaxSize + 1;
  340. typedef typename boost::aligned_storage<max_size * sizeof(T),
  341. boost::alignment_of<T>::value
  342. >::type storage_type;
  343. storage_type storage_;
  344. T * data()
  345. {
  346. return static_cast<T*>(storage_.address());
  347. }
  348. const T * data() const
  349. {
  350. return static_cast<const T*>(storage_.address());
  351. }
  352. protected:
  353. size_type max_number_of_elements() const
  354. {
  355. return max_size;
  356. }
  357. public:
  358. bool push(T const & t)
  359. {
  360. return ringbuffer_base<T>::push(t, data(), max_size);
  361. }
  362. template <typename Functor>
  363. bool consume_one(Functor & f)
  364. {
  365. return ringbuffer_base<T>::consume_one(f, data(), max_size);
  366. }
  367. template <typename Functor>
  368. bool consume_one(Functor const & f)
  369. {
  370. return ringbuffer_base<T>::consume_one(f, data(), max_size);
  371. }
  372. template <typename Functor>
  373. size_type consume_all(Functor & f)
  374. {
  375. return ringbuffer_base<T>::consume_all(f, data(), max_size);
  376. }
  377. template <typename Functor>
  378. size_type consume_all(Functor const & f)
  379. {
  380. return ringbuffer_base<T>::consume_all(f, data(), max_size);
  381. }
  382. size_type push(T const * t, size_type size)
  383. {
  384. return ringbuffer_base<T>::push(t, size, data(), max_size);
  385. }
  386. template <size_type size>
  387. size_type push(T const (&t)[size])
  388. {
  389. return push(t, size);
  390. }
  391. template <typename ConstIterator>
  392. ConstIterator push(ConstIterator begin, ConstIterator end)
  393. {
  394. return ringbuffer_base<T>::push(begin, end, data(), max_size);
  395. }
  396. size_type pop(T * ret, size_type size)
  397. {
  398. return ringbuffer_base<T>::pop(ret, size, data(), max_size);
  399. }
  400. template <typename OutputIterator>
  401. size_type pop_to_output_iterator(OutputIterator it)
  402. {
  403. return ringbuffer_base<T>::pop_to_output_iterator(it, data(), max_size);
  404. }
  405. const T& front(void) const
  406. {
  407. return ringbuffer_base<T>::front(data());
  408. }
  409. T& front(void)
  410. {
  411. return ringbuffer_base<T>::front(data());
  412. }
  413. };
  414. template <typename T, typename Alloc>
  415. class runtime_sized_ringbuffer:
  416. public ringbuffer_base<T>,
  417. private Alloc
  418. {
  419. typedef std::size_t size_type;
  420. size_type max_elements_;
  421. #ifdef BOOST_NO_CXX11_ALLOCATOR
  422. typedef typename Alloc::pointer pointer;
  423. #else
  424. typedef std::allocator_traits<Alloc> allocator_traits;
  425. typedef typename allocator_traits::pointer pointer;
  426. #endif
  427. pointer array_;
  428. protected:
  429. size_type max_number_of_elements() const
  430. {
  431. return max_elements_;
  432. }
  433. public:
  434. explicit runtime_sized_ringbuffer(size_type max_elements):
  435. max_elements_(max_elements + 1)
  436. {
  437. #ifdef BOOST_NO_CXX11_ALLOCATOR
  438. array_ = Alloc::allocate(max_elements_);
  439. #else
  440. Alloc& alloc = *this;
  441. array_ = allocator_traits::allocate(alloc, max_elements_);
  442. #endif
  443. }
  444. template <typename U>
  445. runtime_sized_ringbuffer(typename detail::allocator_rebind_helper<Alloc, U>::type const & alloc, size_type max_elements):
  446. Alloc(alloc), max_elements_(max_elements + 1)
  447. {
  448. #ifdef BOOST_NO_CXX11_ALLOCATOR
  449. array_ = Alloc::allocate(max_elements_);
  450. #else
  451. Alloc& allocator = *this;
  452. array_ = allocator_traits::allocate(allocator, max_elements_);
  453. #endif
  454. }
  455. runtime_sized_ringbuffer(Alloc const & alloc, size_type max_elements):
  456. Alloc(alloc), max_elements_(max_elements + 1)
  457. {
  458. #ifdef BOOST_NO_CXX11_ALLOCATOR
  459. array_ = Alloc::allocate(max_elements_);
  460. #else
  461. Alloc& allocator = *this;
  462. array_ = allocator_traits::allocate(allocator, max_elements_);
  463. #endif
  464. }
  465. ~runtime_sized_ringbuffer(void)
  466. {
  467. // destroy all remaining items
  468. T out;
  469. while (pop(&out, 1)) {}
  470. #ifdef BOOST_NO_CXX11_ALLOCATOR
  471. Alloc::deallocate(array_, max_elements_);
  472. #else
  473. Alloc& allocator = *this;
  474. allocator_traits::deallocate(allocator, array_, max_elements_);
  475. #endif
  476. }
  477. bool push(T const & t)
  478. {
  479. return ringbuffer_base<T>::push(t, &*array_, max_elements_);
  480. }
  481. template <typename Functor>
  482. bool consume_one(Functor & f)
  483. {
  484. return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
  485. }
  486. template <typename Functor>
  487. bool consume_one(Functor const & f)
  488. {
  489. return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
  490. }
  491. template <typename Functor>
  492. size_type consume_all(Functor & f)
  493. {
  494. return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
  495. }
  496. template <typename Functor>
  497. size_type consume_all(Functor const & f)
  498. {
  499. return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
  500. }
  501. size_type push(T const * t, size_type size)
  502. {
  503. return ringbuffer_base<T>::push(t, size, &*array_, max_elements_);
  504. }
  505. template <size_type size>
  506. size_type push(T const (&t)[size])
  507. {
  508. return push(t, size);
  509. }
  510. template <typename ConstIterator>
  511. ConstIterator push(ConstIterator begin, ConstIterator end)
  512. {
  513. return ringbuffer_base<T>::push(begin, end, &*array_, max_elements_);
  514. }
  515. size_type pop(T * ret, size_type size)
  516. {
  517. return ringbuffer_base<T>::pop(ret, size, &*array_, max_elements_);
  518. }
  519. template <typename OutputIterator>
  520. size_type pop_to_output_iterator(OutputIterator it)
  521. {
  522. return ringbuffer_base<T>::pop_to_output_iterator(it, &*array_, max_elements_);
  523. }
  524. const T& front(void) const
  525. {
  526. return ringbuffer_base<T>::front(&*array_);
  527. }
  528. T& front(void)
  529. {
  530. return ringbuffer_base<T>::front(&*array_);
  531. }
  532. };
  533. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  534. template <typename T, typename A0, typename A1>
  535. #else
  536. template <typename T, typename ...Options>
  537. #endif
  538. struct make_ringbuffer
  539. {
  540. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  541. typedef typename ringbuffer_signature::bind<A0, A1>::type bound_args;
  542. #else
  543. typedef typename ringbuffer_signature::bind<Options...>::type bound_args;
  544. #endif
  545. typedef extract_capacity<bound_args> extract_capacity_t;
  546. static const bool runtime_sized = !extract_capacity_t::has_capacity;
  547. static const size_t capacity = extract_capacity_t::capacity;
  548. typedef extract_allocator<bound_args, T> extract_allocator_t;
  549. typedef typename extract_allocator_t::type allocator;
  550. // allocator argument is only sane, for run-time sized ringbuffers
  551. BOOST_STATIC_ASSERT((mpl::if_<mpl::bool_<!runtime_sized>,
  552. mpl::bool_<!extract_allocator_t::has_allocator>,
  553. mpl::true_
  554. >::type::value));
  555. typedef typename mpl::if_c<runtime_sized,
  556. runtime_sized_ringbuffer<T, allocator>,
  557. compile_time_sized_ringbuffer<T, capacity>
  558. >::type ringbuffer_type;
  559. };
  560. } /* namespace detail */
  561. /** The spsc_queue class provides a single-writer/single-reader fifo queue, pushing and popping is wait-free.
  562. *
  563. * \b Policies:
  564. * - \c boost::lockfree::capacity<>, optional <br>
  565. * If this template argument is passed to the options, the size of the ringbuffer is set at compile-time.
  566. *
  567. * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<T>> <br>
  568. * Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is configured
  569. * to be sized at run-time
  570. *
  571. * \b Requirements:
  572. * - T must have a default constructor
  573. * - T must be copyable
  574. * */
  575. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  576. template <typename T, class A0, class A1>
  577. #else
  578. template <typename T, typename ...Options>
  579. #endif
  580. class spsc_queue:
  581. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  582. public detail::make_ringbuffer<T, A0, A1>::ringbuffer_type
  583. #else
  584. public detail::make_ringbuffer<T, Options...>::ringbuffer_type
  585. #endif
  586. {
  587. private:
  588. #ifndef BOOST_DOXYGEN_INVOKED
  589. #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES
  590. typedef typename detail::make_ringbuffer<T, A0, A1>::ringbuffer_type base_type;
  591. static const bool runtime_sized = detail::make_ringbuffer<T, A0, A1>::runtime_sized;
  592. typedef typename detail::make_ringbuffer<T, A0, A1>::allocator allocator_arg;
  593. #else
  594. typedef typename detail::make_ringbuffer<T, Options...>::ringbuffer_type base_type;
  595. static const bool runtime_sized = detail::make_ringbuffer<T, Options...>::runtime_sized;
  596. typedef typename detail::make_ringbuffer<T, Options...>::allocator allocator_arg;
  597. #endif
  598. struct implementation_defined
  599. {
  600. typedef allocator_arg allocator;
  601. typedef std::size_t size_type;
  602. };
  603. #endif
  604. public:
  605. typedef T value_type;
  606. typedef typename implementation_defined::allocator allocator;
  607. typedef typename implementation_defined::size_type size_type;
  608. /** Constructs a spsc_queue
  609. *
  610. * \pre spsc_queue must be configured to be sized at compile-time
  611. */
  612. // @{
  613. spsc_queue(void)
  614. {
  615. BOOST_ASSERT(!runtime_sized);
  616. }
  617. template <typename U>
  618. explicit spsc_queue(typename detail::allocator_rebind_helper<allocator, U>::type const &)
  619. {
  620. // just for API compatibility: we don't actually need an allocator
  621. BOOST_STATIC_ASSERT(!runtime_sized);
  622. }
  623. explicit spsc_queue(allocator const &)
  624. {
  625. // just for API compatibility: we don't actually need an allocator
  626. BOOST_ASSERT(!runtime_sized);
  627. }
  628. // @}
  629. /** Constructs a spsc_queue for element_count elements
  630. *
  631. * \pre spsc_queue must be configured to be sized at run-time
  632. */
  633. // @{
  634. explicit spsc_queue(size_type element_count):
  635. base_type(element_count)
  636. {
  637. BOOST_ASSERT(runtime_sized);
  638. }
  639. template <typename U>
  640. spsc_queue(size_type element_count, typename detail::allocator_rebind_helper<allocator, U>::type const & alloc):
  641. base_type(alloc, element_count)
  642. {
  643. BOOST_STATIC_ASSERT(runtime_sized);
  644. }
  645. spsc_queue(size_type element_count, allocator_arg const & alloc):
  646. base_type(alloc, element_count)
  647. {
  648. BOOST_ASSERT(runtime_sized);
  649. }
  650. // @}
  651. /** Pushes object t to the ringbuffer.
  652. *
  653. * \pre only one thread is allowed to push data to the spsc_queue
  654. * \post object will be pushed to the spsc_queue, unless it is full.
  655. * \return true, if the push operation is successful.
  656. *
  657. * \note Thread-safe and wait-free
  658. * */
  659. bool push(T const & t)
  660. {
  661. return base_type::push(t);
  662. }
  663. /** Pops one object from ringbuffer.
  664. *
  665. * \pre only one thread is allowed to pop data to the spsc_queue
  666. * \post if ringbuffer is not empty, object will be discarded.
  667. * \return true, if the pop operation is successful, false if ringbuffer was empty.
  668. *
  669. * \note Thread-safe and wait-free
  670. */
  671. bool pop ()
  672. {
  673. detail::consume_noop consume_functor;
  674. return consume_one( consume_functor );
  675. }
  676. /** Pops one object from ringbuffer.
  677. *
  678. * \pre only one thread is allowed to pop data to the spsc_queue
  679. * \post if ringbuffer is not empty, object will be copied to ret.
  680. * \return true, if the pop operation is successful, false if ringbuffer was empty.
  681. *
  682. * \note Thread-safe and wait-free
  683. */
  684. template <typename U>
  685. typename boost::enable_if<typename is_convertible<T, U>::type, bool>::type
  686. pop (U & ret)
  687. {
  688. detail::consume_via_copy<U> consume_functor(ret);
  689. return consume_one( consume_functor );
  690. }
  691. /** Pushes as many objects from the array t as there is space.
  692. *
  693. * \pre only one thread is allowed to push data to the spsc_queue
  694. * \return number of pushed items
  695. *
  696. * \note Thread-safe and wait-free
  697. */
  698. size_type push(T const * t, size_type size)
  699. {
  700. return base_type::push(t, size);
  701. }
  702. /** Pushes as many objects from the array t as there is space available.
  703. *
  704. * \pre only one thread is allowed to push data to the spsc_queue
  705. * \return number of pushed items
  706. *
  707. * \note Thread-safe and wait-free
  708. */
  709. template <size_type size>
  710. size_type push(T const (&t)[size])
  711. {
  712. return push(t, size);
  713. }
  714. /** Pushes as many objects from the range [begin, end) as there is space .
  715. *
  716. * \pre only one thread is allowed to push data to the spsc_queue
  717. * \return iterator to the first element, which has not been pushed
  718. *
  719. * \note Thread-safe and wait-free
  720. */
  721. template <typename ConstIterator>
  722. ConstIterator push(ConstIterator begin, ConstIterator end)
  723. {
  724. return base_type::push(begin, end);
  725. }
  726. /** Pops a maximum of size objects from ringbuffer.
  727. *
  728. * \pre only one thread is allowed to pop data to the spsc_queue
  729. * \return number of popped items
  730. *
  731. * \note Thread-safe and wait-free
  732. * */
  733. size_type pop(T * ret, size_type size)
  734. {
  735. return base_type::pop(ret, size);
  736. }
  737. /** Pops a maximum of size objects from spsc_queue.
  738. *
  739. * \pre only one thread is allowed to pop data to the spsc_queue
  740. * \return number of popped items
  741. *
  742. * \note Thread-safe and wait-free
  743. * */
  744. template <size_type size>
  745. size_type pop(T (&ret)[size])
  746. {
  747. return pop(ret, size);
  748. }
  749. /** Pops objects to the output iterator it
  750. *
  751. * \pre only one thread is allowed to pop data to the spsc_queue
  752. * \return number of popped items
  753. *
  754. * \note Thread-safe and wait-free
  755. * */
  756. template <typename OutputIterator>
  757. typename boost::disable_if<typename is_convertible<T, OutputIterator>::type, size_type>::type
  758. pop(OutputIterator it)
  759. {
  760. return base_type::pop_to_output_iterator(it);
  761. }
  762. /** consumes one element via a functor
  763. *
  764. * pops one element from the queue and applies the functor on this object
  765. *
  766. * \returns true, if one element was consumed
  767. *
  768. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  769. * */
  770. template <typename Functor>
  771. bool consume_one(Functor & f)
  772. {
  773. return base_type::consume_one(f);
  774. }
  775. /// \copydoc boost::lockfree::spsc_queue::consume_one(Functor & rhs)
  776. template <typename Functor>
  777. bool consume_one(Functor const & f)
  778. {
  779. return base_type::consume_one(f);
  780. }
  781. /** consumes all elements via a functor
  782. *
  783. * sequentially pops all elements from the queue and applies the functor on each object
  784. *
  785. * \returns number of elements that are consumed
  786. *
  787. * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
  788. * */
  789. template <typename Functor>
  790. size_type consume_all(Functor & f)
  791. {
  792. return base_type::consume_all(f);
  793. }
  794. /// \copydoc boost::lockfree::spsc_queue::consume_all(Functor & rhs)
  795. template <typename Functor>
  796. size_type consume_all(Functor const & f)
  797. {
  798. return base_type::consume_all(f);
  799. }
  800. /** get number of elements that are available for read
  801. *
  802. * \return number of available elements that can be popped from the spsc_queue
  803. *
  804. * \note Thread-safe and wait-free, should only be called from the consumer thread
  805. * */
  806. size_type read_available() const
  807. {
  808. return base_type::read_available(base_type::max_number_of_elements());
  809. }
  810. /** get write space to write elements
  811. *
  812. * \return number of elements that can be pushed to the spsc_queue
  813. *
  814. * \note Thread-safe and wait-free, should only be called from the producer thread
  815. * */
  816. size_type write_available() const
  817. {
  818. return base_type::write_available(base_type::max_number_of_elements());
  819. }
  820. /** get reference to element in the front of the queue
  821. *
  822. * Availability of front element can be checked using read_available().
  823. *
  824. * \pre only a consuming thread is allowed to check front element
  825. * \pre read_available() > 0. If ringbuffer is empty, it's undefined behaviour to invoke this method.
  826. * \return reference to the first element in the queue
  827. *
  828. * \note Thread-safe and wait-free
  829. */
  830. const T& front() const
  831. {
  832. BOOST_ASSERT(read_available() > 0);
  833. return base_type::front();
  834. }
  835. /// \copydoc boost::lockfree::spsc_queue::front() const
  836. T& front()
  837. {
  838. BOOST_ASSERT(read_available() > 0);
  839. return base_type::front();
  840. }
  841. /** reset the ringbuffer
  842. *
  843. * \note Not thread-safe
  844. * */
  845. void reset(void)
  846. {
  847. if ( !boost::has_trivial_destructor<T>::value ) {
  848. // make sure to call all destructors!
  849. T dummy_element;
  850. while (pop(dummy_element))
  851. {}
  852. } else {
  853. base_type::write_index_.store(0, memory_order_relaxed);
  854. base_type::read_index_.store(0, memory_order_release);
  855. }
  856. }
  857. };
  858. } /* namespace lockfree */
  859. } /* namespace boost */
  860. #endif /* BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED */