9
3

x86-gcc.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. #ifndef ATOMIC_X86_GCC_INCLUDED
  2. #define ATOMIC_X86_GCC_INCLUDED
  3. /* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; version 2 of the License.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
  14. /*
  15. XXX 64-bit atomic operations can be implemented using
  16. cmpxchg8b, if necessary. Though I've heard that not all 64-bit
  17. architectures support double-word (128-bit) cas.
  18. */
  19. /*
  20. No special support of 8 and 16 bit operations are implemented here
  21. currently.
  22. */
  23. #undef MY_ATOMIC_HAS_8_AND_16
  24. #ifdef __x86_64__
  25. # ifdef MY_ATOMIC_NO_XADD
  26. # define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix "-no-xadd"
  27. # else
  28. # define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix
  29. # endif
  30. #else
  31. # ifdef MY_ATOMIC_NO_XADD
  32. # define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix "-no-xadd"
  33. # else
  34. # define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix
  35. # endif
  36. #endif
  37. /* fix -ansi errors while maintaining readability */
  38. #ifndef asm
  39. #define asm __asm__
  40. #endif
  41. #ifndef MY_ATOMIC_NO_XADD
  42. #define make_atomic_add_body(S) make_atomic_add_body ## S
  43. #define make_atomic_cas_body(S) make_atomic_cas_body ## S
  44. #endif
  45. #define make_atomic_add_body32 \
  46. asm volatile (LOCK_prefix "; xadd %0, %1;" \
  47. : "+r" (v), "=m" (*a) \
  48. : "m" (*a) \
  49. : "memory")
  50. #define make_atomic_cas_body32 \
  51. __typeof__(*cmp) sav; \
  52. asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \
  53. : "=m" (*a), "=a" (sav), "=q" (ret) \
  54. : "r" (set), "m" (*a), "a" (*cmp) \
  55. : "memory"); \
  56. if (!ret) \
  57. *cmp= sav
  58. #ifdef __x86_64__
  59. #define make_atomic_add_body64 make_atomic_add_body32
  60. #define make_atomic_cas_body64 make_atomic_cas_body32
  61. #define make_atomic_fas_body(S) \
  62. asm volatile ("xchg %0, %1;" \
  63. : "+r" (v), "=m" (*a) \
  64. : "m" (*a) \
  65. : "memory")
  66. /*
  67. Actually 32/64-bit reads/writes are always atomic on x86_64,
  68. nonetheless issue memory barriers as appropriate.
  69. */
  70. #define make_atomic_load_body(S) \
  71. /* Serialize prior load and store operations. */ \
  72. asm volatile ("mfence" ::: "memory"); \
  73. ret= *a; \
  74. /* Prevent compiler from reordering instructions. */ \
  75. asm volatile ("" ::: "memory")
  76. #define make_atomic_store_body(S) \
  77. asm volatile ("; xchg %0, %1;" \
  78. : "=m" (*a), "+r" (v) \
  79. : "m" (*a) \
  80. : "memory")
  81. #else
  82. /*
  83. Use default implementations of 64-bit operations since we solved
  84. the 64-bit problem on 32-bit platforms for CAS, no need to solve it
  85. once more for ADD, LOAD, STORE and FAS as well.
  86. Since we already added add32 support, we need to define add64
  87. here, but we haven't defined fas, load and store at all, so
  88. we can fallback on default implementations.
  89. */
  90. #define make_atomic_add_body64 \
  91. int64 tmp=*a; \
  92. while (!my_atomic_cas64(a, &tmp, tmp+v)) ; \
  93. v=tmp;
  94. /*
  95. On some platforms (e.g. Mac OS X and Solaris) the ebx register
  96. is held as a pointer to the global offset table. Thus we're not
  97. allowed to use the b-register on those platforms when compiling
  98. PIC code, to avoid this we push ebx and pop ebx. The new value
  99. is copied directly from memory to avoid problems with a implicit
  100. manipulation of the stack pointer by the push.
  101. cmpxchg8b works on both 32-bit platforms and 64-bit platforms but
  102. the code here is only used on 32-bit platforms, on 64-bit
  103. platforms the much simpler make_atomic_cas_body32 will work
  104. fine.
  105. */
  106. #define make_atomic_cas_body64 \
  107. asm volatile ("push %%ebx;" \
  108. "movl (%%ecx), %%ebx;" \
  109. "movl 4(%%ecx), %%ecx;" \
  110. LOCK_prefix "; cmpxchg8b (%%esi);" \
  111. "setz %2; pop %%ebx" \
  112. : "+S" (a), "+A" (*cmp), "=c" (ret) \
  113. : "c" (&set) \
  114. : "memory", "esp")
  115. #endif
  116. /*
  117. The implementation of make_atomic_cas_body32 is adaptable to
  118. the OS word size, so on 64-bit platforms it will automatically
  119. adapt to 64-bits and so it will work also on 64-bit platforms
  120. */
  121. #define make_atomic_cas_bodyptr make_atomic_cas_body32
  122. #ifdef MY_ATOMIC_MODE_DUMMY
  123. #define make_atomic_load_body(S) ret=*a
  124. #define make_atomic_store_body(S) *a=v
  125. #endif
  126. #endif /* ATOMIC_X86_GCC_INCLUDED */