ops_gcc_atomic.hpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2014 Andrey Semashev
  7. */
  8. /*!
  9. * \file atomic/detail/ops_gcc_atomic.hpp
  10. *
  11. * This header contains implementation of the \c operations template.
  12. */
  13. #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
  14. #define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
  15. #include <cstddef>
  16. #include <boost/memory_order.hpp>
  17. #include <boost/atomic/detail/config.hpp>
  18. #include <boost/atomic/detail/storage_type.hpp>
  19. #include <boost/atomic/detail/operations_fwd.hpp>
  20. #include <boost/atomic/capabilities.hpp>
  21. #if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
  22. #include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
  23. #include <boost/atomic/detail/ops_cas_based.hpp>
  24. #endif
  25. #if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\
  26. __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\
  27. __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\
  28. __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE
  29. // There are platforms where we need to use larger storage types
  30. #include <boost/atomic/detail/int_sizes.hpp>
  31. #include <boost/atomic/detail/ops_extending_cas_based.hpp>
  32. #endif
  33. #ifdef BOOST_HAS_PRAGMA_ONCE
  34. #pragma once
  35. #endif
  36. #if defined(__INTEL_COMPILER)
  37. // This is used to suppress warning #32013 described below for Intel Compiler.
  38. // In debug builds the compiler does not inline any functions, so basically
  39. // every atomic function call results in this warning. I don't know any other
  40. // way to selectively disable just this one warning.
  41. #pragma system_header
  42. #endif
  43. namespace boost {
  44. namespace atomics {
  45. namespace detail {
  46. /*!
  47. * The function converts \c boost::memory_order values to the compiler-specific constants.
  48. *
  49. * NOTE: The intention is that the function is optimized away by the compiler, and the
  50. * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
  51. * work in this case because the standard atomics interface require memory ordering
  52. * constants to be passed as function arguments, at which point they stop being constexpr.
  53. * However, it is crucial that the compiler sees constants and not runtime values,
  54. * because otherwise it just ignores the ordering value and always uses seq_cst.
  55. * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
  56. * gcc 4.8.2. Intel Compiler issues a warning in this case:
  57. *
  58. * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
  59. *
  60. * while gcc acts silently.
  61. *
  62. * To mitigate the problem ALL functions, including the atomic<> members must be
  63. * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
  64. * all functions are called with constant orderings and call intrinstcts properly.
  65. *
  66. * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
  67. * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
  68. * all atomic operaions will be executed with seq_cst semantics.
  69. */
  70. BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
  71. {
  72. return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
  73. (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
  74. (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
  75. }
  76. template< std::size_t Size, bool Signed >
  77. struct gcc_atomic_operations
  78. {
  79. typedef typename make_storage_type< Size >::type storage_type;
  80. typedef typename make_storage_type< Size >::aligned aligned_storage_type;
  81. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
  82. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  83. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
  84. // Note: In the current implementation, gcc_atomic_operations are used only when the particularly sized __atomic
  85. // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
  86. // always set is_always_lock_free to true here.
  87. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  88. static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  89. {
  90. __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  91. }
  92. static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
  93. {
  94. return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
  95. }
  96. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  97. {
  98. return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  99. }
  100. static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  101. {
  102. return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  103. }
  104. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  105. {
  106. return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  107. }
  108. static BOOST_FORCEINLINE bool compare_exchange_strong(
  109. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  110. {
  111. return __atomic_compare_exchange_n
  112. (
  113. &storage, &expected, desired, false,
  114. atomics::detail::convert_memory_order_to_gcc(success_order),
  115. atomics::detail::convert_memory_order_to_gcc(failure_order)
  116. );
  117. }
  118. static BOOST_FORCEINLINE bool compare_exchange_weak(
  119. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  120. {
  121. return __atomic_compare_exchange_n
  122. (
  123. &storage, &expected, desired, true,
  124. atomics::detail::convert_memory_order_to_gcc(success_order),
  125. atomics::detail::convert_memory_order_to_gcc(failure_order)
  126. );
  127. }
  128. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  129. {
  130. return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  131. }
  132. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  133. {
  134. return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  135. }
  136. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  137. {
  138. return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  139. }
  140. static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  141. {
  142. return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
  143. }
  144. static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  145. {
  146. __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
  147. }
  148. };
  149. #if BOOST_ATOMIC_INT128_LOCK_FREE > 0
  150. #if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  151. // Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
  152. // Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
  153. // A similar problem exists with gcc 7 as well, as it requires to link with libatomic to use 16-byte intrinsics:
  154. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
  155. template< bool Signed >
  156. struct operations< 16u, Signed > :
  157. public cas_based_operations< gcc_dcas_x86_64< Signed > >
  158. {
  159. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
  160. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  161. };
  162. #else
  163. template< bool Signed >
  164. struct operations< 16u, Signed > :
  165. public gcc_atomic_operations< 16u, Signed >
  166. {
  167. };
  168. #endif
  169. #endif
  170. #if BOOST_ATOMIC_INT64_LOCK_FREE > 0
  171. #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
  172. // Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
  173. template< bool Signed >
  174. struct operations< 8u, Signed > :
  175. public cas_based_operations< gcc_dcas_x86< Signed > >
  176. {
  177. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
  178. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  179. };
  180. #elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  181. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  182. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  183. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  184. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
  185. #define BOOST_ATOMIC_DETAIL_INT64_EXTENDED
  186. template< bool Signed >
  187. struct operations< 8u, Signed > :
  188. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 8u, Signed >
  189. {
  190. };
  191. #else
  192. template< bool Signed >
  193. struct operations< 8u, Signed > :
  194. public gcc_atomic_operations< 8u, Signed >
  195. {
  196. };
  197. #endif
  198. #endif
  199. #if BOOST_ATOMIC_INT32_LOCK_FREE > 0
  200. #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  201. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  202. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  203. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  204. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
  205. #define BOOST_ATOMIC_DETAIL_INT32_EXTENDED
  206. #if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  207. template< bool Signed >
  208. struct operations< 4u, Signed > :
  209. public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 4u, Signed >
  210. {
  211. };
  212. #else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  213. template< bool Signed >
  214. struct operations< 4u, Signed > :
  215. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 4u, Signed >
  216. {
  217. };
  218. #endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  219. #else
  220. template< bool Signed >
  221. struct operations< 4u, Signed > :
  222. public gcc_atomic_operations< 4u, Signed >
  223. {
  224. };
  225. #endif
  226. #endif
  227. #if BOOST_ATOMIC_INT16_LOCK_FREE > 0
  228. #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  229. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  230. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  231. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  232. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
  233. #define BOOST_ATOMIC_DETAIL_INT16_EXTENDED
  234. #if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
  235. template< bool Signed >
  236. struct operations< 2u, Signed > :
  237. public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 2u, Signed >
  238. {
  239. };
  240. #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  241. template< bool Signed >
  242. struct operations< 2u, Signed > :
  243. public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 2u, Signed >
  244. {
  245. };
  246. #else
  247. template< bool Signed >
  248. struct operations< 2u, Signed > :
  249. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 2u, Signed >
  250. {
  251. };
  252. #endif
  253. #else
  254. template< bool Signed >
  255. struct operations< 2u, Signed > :
  256. public gcc_atomic_operations< 2u, Signed >
  257. {
  258. };
  259. #endif
  260. #endif
  261. #if BOOST_ATOMIC_INT8_LOCK_FREE > 0
  262. #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  263. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  264. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  265. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  266. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\
  267. (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\
  268. (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE)
  269. #if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED)
  270. template< bool Signed >
  271. struct operations< 1u, Signed > :
  272. public extending_cas_based_operations< gcc_atomic_operations< 2u, Signed >, 1u, Signed >
  273. {
  274. };
  275. #elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
  276. template< bool Signed >
  277. struct operations< 1u, Signed > :
  278. public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 1u, Signed >
  279. {
  280. };
  281. #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  282. template< bool Signed >
  283. struct operations< 1u, Signed > :
  284. public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 1u, Signed >
  285. {
  286. };
  287. #else
  288. template< bool Signed >
  289. struct operations< 1u, Signed > :
  290. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 1u, Signed >
  291. {
  292. };
  293. #endif
  294. #else
  295. template< bool Signed >
  296. struct operations< 1u, Signed > :
  297. public gcc_atomic_operations< 1u, Signed >
  298. {
  299. };
  300. #endif
  301. #endif
  302. #undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED
  303. #undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED
  304. #undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED
  305. BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
  306. {
  307. __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
  308. }
  309. BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
  310. {
  311. __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
  312. }
  313. } // namespace detail
  314. } // namespace atomics
  315. } // namespace boost
  316. #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_