ops_msvc_x86.hpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2009 Helge Bahmann
  7. * Copyright (c) 2012 Tim Blechmann
  8. * Copyright (c) 2014 Andrey Semashev
  9. */
  10. /*!
  11. * \file atomic/detail/ops_msvc_x86.hpp
  12. *
  13. * This header contains implementation of the \c operations template.
  14. */
  15. #ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
  16. #define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
  17. #include <cstddef>
  18. #include <boost/memory_order.hpp>
  19. #include <boost/atomic/detail/config.hpp>
  20. #include <boost/atomic/detail/interlocked.hpp>
  21. #include <boost/atomic/detail/storage_type.hpp>
  22. #include <boost/atomic/detail/operations_fwd.hpp>
  23. #include <boost/atomic/detail/type_traits/make_signed.hpp>
  24. #include <boost/atomic/capabilities.hpp>
  25. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  26. #include <boost/cstdint.hpp>
  27. #include <boost/atomic/detail/ops_cas_based.hpp>
  28. #endif
  29. #include <boost/atomic/detail/ops_msvc_common.hpp>
  30. #if !defined(_M_IX86) && !(defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8) && defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16))
  31. #include <boost/atomic/detail/ops_extending_cas_based.hpp>
  32. #endif
  33. #ifdef BOOST_HAS_PRAGMA_ONCE
  34. #pragma once
  35. #endif
  36. #if defined(BOOST_MSVC)
  37. #pragma warning(push)
  38. // frame pointer register 'ebx' modified by inline assembly code. See the note below.
  39. #pragma warning(disable: 4731)
  40. #endif
  41. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
  42. extern "C" void _mm_mfence(void);
  43. #if defined(BOOST_MSVC)
  44. #pragma intrinsic(_mm_mfence)
  45. #endif
  46. #endif
  47. namespace boost {
  48. namespace atomics {
  49. namespace detail {
  50. /*
  51. * Implementation note for asm blocks.
  52. *
  53. * http://msdn.microsoft.com/en-us/data/k1a8ss06%28v=vs.105%29
  54. *
  55. * Some SSE types require eight-byte stack alignment, forcing the compiler to emit dynamic stack-alignment code.
  56. * To be able to access both the local variables and the function parameters after the alignment, the compiler
  57. * maintains two frame pointers. If the compiler performs frame pointer omission (FPO), it will use EBP and ESP.
  58. * If the compiler does not perform FPO, it will use EBX and EBP. To ensure code runs correctly, do not modify EBX
  59. * in asm code if the function requires dynamic stack alignment as it could modify the frame pointer.
  60. * Either move the eight-byte aligned types out of the function, or avoid using EBX.
  61. *
  62. * Since we have no way of knowing that the compiler uses FPO, we have to always save and restore ebx
  63. * whenever we have to clobber it. Additionally, we disable warning C4731 above so that the compiler
  64. * doesn't spam about ebx use.
  65. */
  66. struct msvc_x86_operations_base
  67. {
  68. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
  69. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  70. static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
  71. {
  72. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE)
  73. _mm_mfence();
  74. #else
  75. long tmp;
  76. BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
  77. #endif
  78. }
  79. static BOOST_FORCEINLINE void fence_before(memory_order) BOOST_NOEXCEPT
  80. {
  81. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  82. }
  83. static BOOST_FORCEINLINE void fence_after(memory_order) BOOST_NOEXCEPT
  84. {
  85. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  86. }
  87. static BOOST_FORCEINLINE void fence_after_load(memory_order) BOOST_NOEXCEPT
  88. {
  89. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  90. // On x86 and x86_64 there is no need for a hardware barrier,
  91. // even if seq_cst memory order is requested, because all
  92. // seq_cst writes are implemented with lock-prefixed operations
  93. // or xchg which has implied lock prefix. Therefore normal loads
  94. // are already ordered with seq_cst stores on these architectures.
  95. }
  96. };
  97. template< std::size_t Size, bool Signed, typename Derived >
  98. struct msvc_x86_operations :
  99. public msvc_x86_operations_base
  100. {
  101. typedef typename make_storage_type< Size >::type storage_type;
  102. typedef typename make_storage_type< Size >::aligned aligned_storage_type;
  103. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
  104. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  105. static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  106. {
  107. if (order != memory_order_seq_cst)
  108. {
  109. fence_before(order);
  110. storage = v;
  111. fence_after(order);
  112. }
  113. else
  114. {
  115. Derived::exchange(storage, v, order);
  116. }
  117. }
  118. static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
  119. {
  120. storage_type v = storage;
  121. fence_after_load(order);
  122. return v;
  123. }
  124. static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  125. {
  126. typedef typename boost::atomics::detail::make_signed< storage_type >::type signed_storage_type;
  127. return Derived::fetch_add(storage, static_cast< storage_type >(-static_cast< signed_storage_type >(v)), order);
  128. }
  129. static BOOST_FORCEINLINE bool compare_exchange_weak(
  130. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  131. {
  132. return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order);
  133. }
  134. static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  135. {
  136. return !!Derived::exchange(storage, (storage_type)1, order);
  137. }
  138. static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  139. {
  140. store(storage, (storage_type)0, order);
  141. }
  142. };
  143. template< bool Signed >
  144. struct operations< 4u, Signed > :
  145. public msvc_x86_operations< 4u, Signed, operations< 4u, Signed > >
  146. {
  147. typedef msvc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
  148. typedef typename base_type::storage_type storage_type;
  149. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  150. {
  151. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
  152. }
  153. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  154. {
  155. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&storage, v));
  156. }
  157. static BOOST_FORCEINLINE bool compare_exchange_strong(
  158. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  159. {
  160. storage_type previous = expected;
  161. storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&storage, desired, previous));
  162. expected = old_val;
  163. return (previous == old_val);
  164. }
  165. #if defined(BOOST_ATOMIC_INTERLOCKED_AND)
  166. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  167. {
  168. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND(&storage, v));
  169. }
  170. #else
  171. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  172. {
  173. storage_type res = storage;
  174. while (!compare_exchange_strong(storage, res, res & v, order, memory_order_relaxed)) {}
  175. return res;
  176. }
  177. #endif
  178. #if defined(BOOST_ATOMIC_INTERLOCKED_OR)
  179. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  180. {
  181. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR(&storage, v));
  182. }
  183. #else
  184. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  185. {
  186. storage_type res = storage;
  187. while (!compare_exchange_strong(storage, res, res | v, order, memory_order_relaxed)) {}
  188. return res;
  189. }
  190. #endif
  191. #if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
  192. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  193. {
  194. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&storage, v));
  195. }
  196. #else
  197. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  198. {
  199. storage_type res = storage;
  200. while (!compare_exchange_strong(storage, res, res ^ v, order, memory_order_relaxed)) {}
  201. return res;
  202. }
  203. #endif
  204. };
  205. #if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8)
  206. template< bool Signed >
  207. struct operations< 1u, Signed > :
  208. public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
  209. {
  210. typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
  211. typedef typename base_type::storage_type storage_type;
  212. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  213. {
  214. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
  215. }
  216. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  217. {
  218. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&storage, v));
  219. }
  220. static BOOST_FORCEINLINE bool compare_exchange_strong(
  221. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  222. {
  223. storage_type previous = expected;
  224. storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&storage, desired, previous));
  225. expected = old_val;
  226. return (previous == old_val);
  227. }
  228. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  229. {
  230. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&storage, v));
  231. }
  232. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  233. {
  234. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&storage, v));
  235. }
  236. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  237. {
  238. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&storage, v));
  239. }
  240. };
  241. #elif defined(_M_IX86)
  242. template< bool Signed >
  243. struct operations< 1u, Signed > :
  244. public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
  245. {
  246. typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
  247. typedef typename base_type::storage_type storage_type;
  248. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  249. {
  250. base_type::fence_before(order);
  251. __asm
  252. {
  253. mov edx, storage
  254. movzx eax, v
  255. lock xadd byte ptr [edx], al
  256. mov v, al
  257. };
  258. base_type::fence_after(order);
  259. return v;
  260. }
  261. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  262. {
  263. base_type::fence_before(order);
  264. __asm
  265. {
  266. mov edx, storage
  267. movzx eax, v
  268. xchg byte ptr [edx], al
  269. mov v, al
  270. };
  271. base_type::fence_after(order);
  272. return v;
  273. }
  274. static BOOST_FORCEINLINE bool compare_exchange_strong(
  275. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
  276. {
  277. base_type::fence_before(success_order);
  278. bool success;
  279. __asm
  280. {
  281. mov esi, expected
  282. mov edi, storage
  283. movzx eax, byte ptr [esi]
  284. movzx edx, desired
  285. lock cmpxchg byte ptr [edi], dl
  286. mov byte ptr [esi], al
  287. sete success
  288. };
  289. // The success and failure fences are equivalent anyway
  290. base_type::fence_after(success_order);
  291. return success;
  292. }
  293. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  294. {
  295. base_type::fence_before(order);
  296. __asm
  297. {
  298. mov edi, storage
  299. movzx ecx, v
  300. xor edx, edx
  301. movzx eax, byte ptr [edi]
  302. align 16
  303. again:
  304. mov dl, al
  305. and dl, cl
  306. lock cmpxchg byte ptr [edi], dl
  307. jne again
  308. mov v, al
  309. };
  310. base_type::fence_after(order);
  311. return v;
  312. }
  313. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  314. {
  315. base_type::fence_before(order);
  316. __asm
  317. {
  318. mov edi, storage
  319. movzx ecx, v
  320. xor edx, edx
  321. movzx eax, byte ptr [edi]
  322. align 16
  323. again:
  324. mov dl, al
  325. or dl, cl
  326. lock cmpxchg byte ptr [edi], dl
  327. jne again
  328. mov v, al
  329. };
  330. base_type::fence_after(order);
  331. return v;
  332. }
  333. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  334. {
  335. base_type::fence_before(order);
  336. __asm
  337. {
  338. mov edi, storage
  339. movzx ecx, v
  340. xor edx, edx
  341. movzx eax, byte ptr [edi]
  342. align 16
  343. again:
  344. mov dl, al
  345. xor dl, cl
  346. lock cmpxchg byte ptr [edi], dl
  347. jne again
  348. mov v, al
  349. };
  350. base_type::fence_after(order);
  351. return v;
  352. }
  353. };
  354. #else
  355. template< bool Signed >
  356. struct operations< 1u, Signed > :
  357. public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
  358. {
  359. };
  360. #endif
  361. #if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16)
  362. template< bool Signed >
  363. struct operations< 2u, Signed > :
  364. public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
  365. {
  366. typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
  367. typedef typename base_type::storage_type storage_type;
  368. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  369. {
  370. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
  371. }
  372. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  373. {
  374. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&storage, v));
  375. }
  376. static BOOST_FORCEINLINE bool compare_exchange_strong(
  377. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  378. {
  379. storage_type previous = expected;
  380. storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&storage, desired, previous));
  381. expected = old_val;
  382. return (previous == old_val);
  383. }
  384. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  385. {
  386. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&storage, v));
  387. }
  388. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  389. {
  390. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&storage, v));
  391. }
  392. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  393. {
  394. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&storage, v));
  395. }
  396. };
  397. #elif defined(_M_IX86)
  398. template< bool Signed >
  399. struct operations< 2u, Signed > :
  400. public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
  401. {
  402. typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
  403. typedef typename base_type::storage_type storage_type;
  404. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  405. {
  406. base_type::fence_before(order);
  407. __asm
  408. {
  409. mov edx, storage
  410. movzx eax, v
  411. lock xadd word ptr [edx], ax
  412. mov v, ax
  413. };
  414. base_type::fence_after(order);
  415. return v;
  416. }
  417. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  418. {
  419. base_type::fence_before(order);
  420. __asm
  421. {
  422. mov edx, storage
  423. movzx eax, v
  424. xchg word ptr [edx], ax
  425. mov v, ax
  426. };
  427. base_type::fence_after(order);
  428. return v;
  429. }
  430. static BOOST_FORCEINLINE bool compare_exchange_strong(
  431. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order) BOOST_NOEXCEPT
  432. {
  433. base_type::fence_before(success_order);
  434. bool success;
  435. __asm
  436. {
  437. mov esi, expected
  438. mov edi, storage
  439. movzx eax, word ptr [esi]
  440. movzx edx, desired
  441. lock cmpxchg word ptr [edi], dx
  442. mov word ptr [esi], ax
  443. sete success
  444. };
  445. // The success and failure fences are equivalent anyway
  446. base_type::fence_after(success_order);
  447. return success;
  448. }
  449. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  450. {
  451. base_type::fence_before(order);
  452. __asm
  453. {
  454. mov edi, storage
  455. movzx ecx, v
  456. xor edx, edx
  457. movzx eax, word ptr [edi]
  458. align 16
  459. again:
  460. mov dx, ax
  461. and dx, cx
  462. lock cmpxchg word ptr [edi], dx
  463. jne again
  464. mov v, ax
  465. };
  466. base_type::fence_after(order);
  467. return v;
  468. }
  469. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  470. {
  471. base_type::fence_before(order);
  472. __asm
  473. {
  474. mov edi, storage
  475. movzx ecx, v
  476. xor edx, edx
  477. movzx eax, word ptr [edi]
  478. align 16
  479. again:
  480. mov dx, ax
  481. or dx, cx
  482. lock cmpxchg word ptr [edi], dx
  483. jne again
  484. mov v, ax
  485. };
  486. base_type::fence_after(order);
  487. return v;
  488. }
  489. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  490. {
  491. base_type::fence_before(order);
  492. __asm
  493. {
  494. mov edi, storage
  495. movzx ecx, v
  496. xor edx, edx
  497. movzx eax, word ptr [edi]
  498. align 16
  499. again:
  500. mov dx, ax
  501. xor dx, cx
  502. lock cmpxchg word ptr [edi], dx
  503. jne again
  504. mov v, ax
  505. };
  506. base_type::fence_after(order);
  507. return v;
  508. }
  509. };
  510. #else
  511. template< bool Signed >
  512. struct operations< 2u, Signed > :
  513. public extending_cas_based_operations< operations< 4u, Signed >, 2u, Signed >
  514. {
  515. };
  516. #endif
  517. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
  518. template< bool Signed >
  519. struct msvc_dcas_x86
  520. {
  521. typedef typename make_storage_type< 8u >::type storage_type;
  522. typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
  523. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
  524. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  525. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
  526. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  527. // Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
  528. //
  529. // The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
  530. // * Reading or writing a quadword aligned on a 64-bit boundary
  531. //
  532. // Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
  533. // have at least 8 byte alignment. The only unfortunate case is when atomic is placed on the stack and it is not 8-byte aligned (like on 32 bit Windows).
  534. static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  535. {
  536. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  537. storage_type volatile* p = &storage;
  538. if (((uint32_t)p & 0x00000007) == 0)
  539. {
  540. #if defined(_M_IX86_FP) && _M_IX86_FP >= 2
  541. #if defined(__AVX__)
  542. __asm
  543. {
  544. mov edx, p
  545. vmovq xmm4, v
  546. vmovq qword ptr [edx], xmm4
  547. };
  548. #else
  549. __asm
  550. {
  551. mov edx, p
  552. movq xmm4, v
  553. movq qword ptr [edx], xmm4
  554. };
  555. #endif
  556. #else
  557. __asm
  558. {
  559. mov edx, p
  560. fild v
  561. fistp qword ptr [edx]
  562. };
  563. #endif
  564. }
  565. else
  566. {
  567. uint32_t backup;
  568. __asm
  569. {
  570. mov backup, ebx
  571. mov edi, p
  572. mov ebx, dword ptr [v]
  573. mov ecx, dword ptr [v + 4]
  574. mov eax, dword ptr [edi]
  575. mov edx, dword ptr [edi + 4]
  576. align 16
  577. again:
  578. lock cmpxchg8b qword ptr [edi]
  579. jne again
  580. mov ebx, backup
  581. };
  582. }
  583. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  584. }
  585. static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
  586. {
  587. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  588. storage_type const volatile* p = &storage;
  589. storage_type value;
  590. if (((uint32_t)p & 0x00000007) == 0)
  591. {
  592. #if defined(_M_IX86_FP) && _M_IX86_FP >= 2
  593. #if defined(__AVX__)
  594. __asm
  595. {
  596. mov edx, p
  597. vmovq xmm4, qword ptr [edx]
  598. vmovq value, xmm4
  599. };
  600. #else
  601. __asm
  602. {
  603. mov edx, p
  604. movq xmm4, qword ptr [edx]
  605. movq value, xmm4
  606. };
  607. #endif
  608. #else
  609. __asm
  610. {
  611. mov edx, p
  612. fild qword ptr [edx]
  613. fistp value
  614. };
  615. #endif
  616. }
  617. else
  618. {
  619. // We don't care for comparison result here; the previous value will be stored into value anyway.
  620. // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
  621. __asm
  622. {
  623. mov edi, p
  624. mov eax, ebx
  625. mov edx, ecx
  626. lock cmpxchg8b qword ptr [edi]
  627. mov dword ptr [value], eax
  628. mov dword ptr [value + 4], edx
  629. };
  630. }
  631. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  632. return value;
  633. }
  634. static BOOST_FORCEINLINE bool compare_exchange_strong(
  635. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  636. {
  637. // MSVC-11 in 32-bit mode sometimes generates messed up code without compiler barriers,
  638. // even though the _InterlockedCompareExchange64 intrinsic already provides one.
  639. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  640. storage_type volatile* p = &storage;
  641. #if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
  642. const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
  643. const bool result = (old_val == expected);
  644. expected = old_val;
  645. #else
  646. bool result;
  647. uint32_t backup;
  648. __asm
  649. {
  650. mov backup, ebx
  651. mov edi, p
  652. mov esi, expected
  653. mov ebx, dword ptr [desired]
  654. mov ecx, dword ptr [desired + 4]
  655. mov eax, dword ptr [esi]
  656. mov edx, dword ptr [esi + 4]
  657. lock cmpxchg8b qword ptr [edi]
  658. mov dword ptr [esi], eax
  659. mov dword ptr [esi + 4], edx
  660. mov ebx, backup
  661. sete result
  662. };
  663. #endif
  664. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  665. return result;
  666. }
  667. static BOOST_FORCEINLINE bool compare_exchange_weak(
  668. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  669. {
  670. return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
  671. }
  672. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  673. {
  674. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  675. storage_type volatile* p = &storage;
  676. uint32_t backup;
  677. __asm
  678. {
  679. mov backup, ebx
  680. mov edi, p
  681. mov ebx, dword ptr [v]
  682. mov ecx, dword ptr [v + 4]
  683. mov eax, dword ptr [edi]
  684. mov edx, dword ptr [edi + 4]
  685. align 16
  686. again:
  687. lock cmpxchg8b qword ptr [edi]
  688. jne again
  689. mov ebx, backup
  690. mov dword ptr [v], eax
  691. mov dword ptr [v + 4], edx
  692. };
  693. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  694. return v;
  695. }
  696. };
  697. template< bool Signed >
  698. struct operations< 8u, Signed > :
  699. public cas_based_operations< msvc_dcas_x86< Signed > >
  700. {
  701. };
  702. #elif defined(_M_AMD64)
  703. template< bool Signed >
  704. struct operations< 8u, Signed > :
  705. public msvc_x86_operations< 8u, Signed, operations< 8u, Signed > >
  706. {
  707. typedef msvc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
  708. typedef typename base_type::storage_type storage_type;
  709. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  710. {
  711. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
  712. }
  713. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  714. {
  715. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&storage, v));
  716. }
  717. static BOOST_FORCEINLINE bool compare_exchange_strong(
  718. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  719. {
  720. storage_type previous = expected;
  721. storage_type old_val = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&storage, desired, previous));
  722. expected = old_val;
  723. return (previous == old_val);
  724. }
  725. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  726. {
  727. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&storage, v));
  728. }
  729. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  730. {
  731. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&storage, v));
  732. }
  733. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  734. {
  735. return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&storage, v));
  736. }
  737. };
  738. #endif
  739. #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  740. template< bool Signed >
  741. struct msvc_dcas_x86_64
  742. {
  743. typedef typename make_storage_type< 16u >::type storage_type;
  744. typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
  745. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
  746. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  747. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
  748. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  749. static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
  750. {
  751. storage_type value = const_cast< storage_type& >(storage);
  752. while (!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, v, &value)) {}
  753. }
  754. static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
  755. {
  756. storage_type value = storage_type();
  757. BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, value, &value);
  758. return value;
  759. }
  760. static BOOST_FORCEINLINE bool compare_exchange_strong(
  761. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
  762. {
  763. return !!BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE128(&storage, desired, &expected);
  764. }
  765. static BOOST_FORCEINLINE bool compare_exchange_weak(
  766. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  767. {
  768. return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
  769. }
  770. };
  771. template< bool Signed >
  772. struct operations< 16u, Signed > :
  773. public cas_based_operations< cas_based_exchange< msvc_dcas_x86_64< Signed > > >
  774. {
  775. };
  776. #endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  777. BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
  778. {
  779. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  780. if (order == memory_order_seq_cst)
  781. msvc_x86_operations_base::hardware_full_fence();
  782. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  783. }
  784. BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
  785. {
  786. if (order != memory_order_relaxed)
  787. BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
  788. }
  789. } // namespace detail
  790. } // namespace atomics
  791. } // namespace boost
  792. #if defined(BOOST_MSVC)
  793. #pragma warning(pop)
  794. #endif
  795. #endif // BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_