ops_gcc_x86.hpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2009 Helge Bahmann
  7. * Copyright (c) 2012 Tim Blechmann
  8. * Copyright (c) 2014 Andrey Semashev
  9. */
  10. /*!
  11. * \file atomic/detail/ops_gcc_x86.hpp
  12. *
  13. * This header contains implementation of the \c operations template.
  14. */
  15. #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
  16. #define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
  17. #include <cstddef>
  18. #include <boost/memory_order.hpp>
  19. #include <boost/atomic/detail/config.hpp>
  20. #include <boost/atomic/detail/storage_type.hpp>
  21. #include <boost/atomic/detail/operations_fwd.hpp>
  22. #include <boost/atomic/capabilities.hpp>
  23. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  24. #include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
  25. #include <boost/atomic/detail/ops_cas_based.hpp>
  26. #endif
  27. #ifdef BOOST_HAS_PRAGMA_ONCE
  28. #pragma once
  29. #endif
  30. namespace boost {
  31. namespace atomics {
  32. namespace detail {
  33. struct gcc_x86_operations_base
  34. {
  35. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
  36. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  37. static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
  38. {
  39. if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
  40. __asm__ __volatile__ ("" ::: "memory");
  41. }
  42. static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
  43. {
  44. if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
  45. __asm__ __volatile__ ("" ::: "memory");
  46. }
  47. };
  48. template< std::size_t Size, bool Signed, typename Derived >
  49. struct gcc_x86_operations :
  50. public gcc_x86_operations_base
  51. {
  52. typedef typename make_storage_type< Size >::type storage_type;
  53. static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  54. {
  55. if (order != memory_order_seq_cst)
  56. {
  57. fence_before(order);
  58. storage = v;
  59. fence_after(order);
  60. }
  61. else
  62. {
  63. Derived::exchange(storage, v, order);
  64. }
  65. }
  66. static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
  67. {
  68. storage_type v = storage;
  69. fence_after(order);
  70. return v;
  71. }
  72. static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  73. {
  74. return Derived::fetch_add(storage, -v, order);
  75. }
  76. static BOOST_FORCEINLINE bool compare_exchange_weak(
  77. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  78. {
  79. return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
  80. }
  81. static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  82. {
  83. return !!Derived::exchange(storage, (storage_type)1, order);
  84. }
  85. static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  86. {
  87. store(storage, (storage_type)0, order);
  88. }
  89. };
  90. template< bool Signed >
  91. struct operations< 1u, Signed > :
  92. public gcc_x86_operations< 1u, Signed, operations< 1u, Signed > >
  93. {
  94. typedef gcc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
  95. typedef typename base_type::storage_type storage_type;
  96. typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
  97. typedef typename make_storage_type< 4u >::type temp_storage_type;
  98. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
  99. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  100. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  101. {
  102. __asm__ __volatile__
  103. (
  104. "lock; xaddb %0, %1"
  105. : "+q" (v), "+m" (storage)
  106. :
  107. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  108. );
  109. return v;
  110. }
  111. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  112. {
  113. __asm__ __volatile__
  114. (
  115. "xchgb %0, %1"
  116. : "+q" (v), "+m" (storage)
  117. :
  118. : "memory"
  119. );
  120. return v;
  121. }
  122. static BOOST_FORCEINLINE bool compare_exchange_strong(
  123. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  124. {
  125. storage_type previous = expected;
  126. bool success;
  127. #if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  128. __asm__ __volatile__
  129. (
  130. "lock; cmpxchgb %3, %1"
  131. : "+a" (previous), "+m" (storage), "=@ccz" (success)
  132. : "q" (desired)
  133. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  134. );
  135. #else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  136. __asm__ __volatile__
  137. (
  138. "lock; cmpxchgb %3, %1\n\t"
  139. "sete %2"
  140. : "+a" (previous), "+m" (storage), "=q" (success)
  141. : "q" (desired)
  142. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  143. );
  144. #endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  145. expected = previous;
  146. return success;
  147. }
  148. #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
  149. temp_storage_type new_val;\
  150. __asm__ __volatile__\
  151. (\
  152. ".align 16\n\t"\
  153. "1: mov %[arg], %2\n\t"\
  154. op " %%al, %b2\n\t"\
  155. "lock; cmpxchgb %b2, %[storage]\n\t"\
  156. "jne 1b"\
  157. : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
  158. : [arg] "ir" ((temp_storage_type)argument)\
  159. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
  160. )
  161. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  162. {
  163. storage_type res = storage;
  164. BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res);
  165. return res;
  166. }
  167. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  168. {
  169. storage_type res = storage;
  170. BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res);
  171. return res;
  172. }
  173. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  174. {
  175. storage_type res = storage;
  176. BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res);
  177. return res;
  178. }
  179. #undef BOOST_ATOMIC_DETAIL_CAS_LOOP
  180. };
  181. template< bool Signed >
  182. struct operations< 2u, Signed > :
  183. public gcc_x86_operations< 2u, Signed, operations< 2u, Signed > >
  184. {
  185. typedef gcc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
  186. typedef typename base_type::storage_type storage_type;
  187. typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
  188. typedef typename make_storage_type< 4u >::type temp_storage_type;
  189. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
  190. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  191. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  192. {
  193. __asm__ __volatile__
  194. (
  195. "lock; xaddw %0, %1"
  196. : "+q" (v), "+m" (storage)
  197. :
  198. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  199. );
  200. return v;
  201. }
  202. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  203. {
  204. __asm__ __volatile__
  205. (
  206. "xchgw %0, %1"
  207. : "+q" (v), "+m" (storage)
  208. :
  209. : "memory"
  210. );
  211. return v;
  212. }
  213. static BOOST_FORCEINLINE bool compare_exchange_strong(
  214. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  215. {
  216. storage_type previous = expected;
  217. bool success;
  218. #if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  219. __asm__ __volatile__
  220. (
  221. "lock; cmpxchgw %3, %1"
  222. : "+a" (previous), "+m" (storage), "=@ccz" (success)
  223. : "q" (desired)
  224. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  225. );
  226. #else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  227. __asm__ __volatile__
  228. (
  229. "lock; cmpxchgw %3, %1\n\t"
  230. "sete %2"
  231. : "+a" (previous), "+m" (storage), "=q" (success)
  232. : "q" (desired)
  233. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  234. );
  235. #endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  236. expected = previous;
  237. return success;
  238. }
  239. #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
  240. temp_storage_type new_val;\
  241. __asm__ __volatile__\
  242. (\
  243. ".align 16\n\t"\
  244. "1: mov %[arg], %2\n\t"\
  245. op " %%ax, %w2\n\t"\
  246. "lock; cmpxchgw %w2, %[storage]\n\t"\
  247. "jne 1b"\
  248. : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
  249. : [arg] "ir" ((temp_storage_type)argument)\
  250. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
  251. )
  252. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  253. {
  254. storage_type res = storage;
  255. BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res);
  256. return res;
  257. }
  258. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  259. {
  260. storage_type res = storage;
  261. BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res);
  262. return res;
  263. }
  264. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  265. {
  266. storage_type res = storage;
  267. BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res);
  268. return res;
  269. }
  270. #undef BOOST_ATOMIC_DETAIL_CAS_LOOP
  271. };
  272. template< bool Signed >
  273. struct operations< 4u, Signed > :
  274. public gcc_x86_operations< 4u, Signed, operations< 4u, Signed > >
  275. {
  276. typedef gcc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
  277. typedef typename base_type::storage_type storage_type;
  278. typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
  279. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
  280. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  281. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  282. {
  283. __asm__ __volatile__
  284. (
  285. "lock; xaddl %0, %1"
  286. : "+r" (v), "+m" (storage)
  287. :
  288. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  289. );
  290. return v;
  291. }
  292. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  293. {
  294. __asm__ __volatile__
  295. (
  296. "xchgl %0, %1"
  297. : "+r" (v), "+m" (storage)
  298. :
  299. : "memory"
  300. );
  301. return v;
  302. }
  303. static BOOST_FORCEINLINE bool compare_exchange_strong(
  304. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  305. {
  306. storage_type previous = expected;
  307. bool success;
  308. #if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  309. __asm__ __volatile__
  310. (
  311. "lock; cmpxchgl %3, %1"
  312. : "+a" (previous), "+m" (storage), "=@ccz" (success)
  313. : "r" (desired)
  314. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  315. );
  316. #else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  317. __asm__ __volatile__
  318. (
  319. "lock; cmpxchgl %3, %1\n\t"
  320. "sete %2"
  321. : "+a" (previous), "+m" (storage), "=q" (success)
  322. : "r" (desired)
  323. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  324. );
  325. #endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  326. expected = previous;
  327. return success;
  328. }
  329. #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
  330. storage_type new_val;\
  331. __asm__ __volatile__\
  332. (\
  333. ".align 16\n\t"\
  334. "1: mov %[arg], %[new_val]\n\t"\
  335. op " %%eax, %[new_val]\n\t"\
  336. "lock; cmpxchgl %[new_val], %[storage]\n\t"\
  337. "jne 1b"\
  338. : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
  339. : [arg] "ir" (argument)\
  340. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
  341. )
  342. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  343. {
  344. storage_type res = storage;
  345. BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res);
  346. return res;
  347. }
  348. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  349. {
  350. storage_type res = storage;
  351. BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res);
  352. return res;
  353. }
  354. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  355. {
  356. storage_type res = storage;
  357. BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res);
  358. return res;
  359. }
  360. #undef BOOST_ATOMIC_DETAIL_CAS_LOOP
  361. };
  362. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
  363. template< bool Signed >
  364. struct operations< 8u, Signed > :
  365. public cas_based_operations< gcc_dcas_x86< Signed > >
  366. {
  367. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
  368. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  369. };
  370. #elif defined(__x86_64__)
  371. template< bool Signed >
  372. struct operations< 8u, Signed > :
  373. public gcc_x86_operations< 8u, Signed, operations< 8u, Signed > >
  374. {
  375. typedef gcc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
  376. typedef typename base_type::storage_type storage_type;
  377. typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
  378. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
  379. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  380. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  381. {
  382. __asm__ __volatile__
  383. (
  384. "lock; xaddq %0, %1"
  385. : "+r" (v), "+m" (storage)
  386. :
  387. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  388. );
  389. return v;
  390. }
  391. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  392. {
  393. __asm__ __volatile__
  394. (
  395. "xchgq %0, %1"
  396. : "+r" (v), "+m" (storage)
  397. :
  398. : "memory"
  399. );
  400. return v;
  401. }
  402. static BOOST_FORCEINLINE bool compare_exchange_strong(
  403. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  404. {
  405. storage_type previous = expected;
  406. bool success;
  407. #if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  408. __asm__ __volatile__
  409. (
  410. "lock; cmpxchgq %3, %1"
  411. : "+a" (previous), "+m" (storage), "=@ccz" (success)
  412. : "r" (desired)
  413. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  414. );
  415. #else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  416. __asm__ __volatile__
  417. (
  418. "lock; cmpxchgq %3, %1\n\t"
  419. "sete %2"
  420. : "+a" (previous), "+m" (storage), "=q" (success)
  421. : "r" (desired)
  422. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
  423. );
  424. #endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
  425. expected = previous;
  426. return success;
  427. }
  428. #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
  429. storage_type new_val;\
  430. __asm__ __volatile__\
  431. (\
  432. ".align 16\n\t"\
  433. "1: movq %[arg], %[new_val]\n\t"\
  434. op " %%rax, %[new_val]\n\t"\
  435. "lock; cmpxchgq %[new_val], %[storage]\n\t"\
  436. "jne 1b"\
  437. : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
  438. : [arg] "r" (argument)\
  439. : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
  440. )
  441. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  442. {
  443. storage_type res = storage;
  444. BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res);
  445. return res;
  446. }
  447. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  448. {
  449. storage_type res = storage;
  450. BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res);
  451. return res;
  452. }
  453. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  454. {
  455. storage_type res = storage;
  456. BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res);
  457. return res;
  458. }
  459. #undef BOOST_ATOMIC_DETAIL_CAS_LOOP
  460. };
  461. #endif
  462. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  463. template< bool Signed >
  464. struct operations< 16u, Signed > :
  465. public cas_based_operations< gcc_dcas_x86_64< Signed > >
  466. {
  467. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
  468. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  469. };
  470. #endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  471. BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
  472. {
  473. if (order == memory_order_seq_cst)
  474. {
  475. __asm__ __volatile__
  476. (
  477. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
  478. "mfence\n"
  479. #else
  480. "lock; addl $0, (%%esp)\n"
  481. #endif
  482. ::: "memory"
  483. );
  484. }
  485. else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
  486. {
  487. __asm__ __volatile__ ("" ::: "memory");
  488. }
  489. }
  490. BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
  491. {
  492. if (order != memory_order_relaxed)
  493. __asm__ __volatile__ ("" ::: "memory");
  494. }
  495. } // namespace detail
  496. } // namespace atomics
  497. } // namespace boost
  498. #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_