]> git.ipfire.org Git - thirdparty/glibc.git/blobdiff - sysdeps/x86_64/bits/atomic.h
[BZ #2510, BZ #2830, BZ #3137, BZ #3313, BZ #3426, BZ #3465, BZ #3480, BZ #3483,...
[thirdparty/glibc.git] / sysdeps / x86_64 / bits / atomic.h
index 133a68d192ad2e701d3b7ee462706a1cf8522ca2..65d6b02008ff9d68c6f0dc3b8a5bc7ccccc3e96c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -18,6 +18,7 @@
    02111-1307 USA.  */
 
 #include <stdint.h>
+#include <tls.h>       /* For tcbhead_t.  */
 
 
 typedef int8_t atomic8_t;
@@ -80,8 +81,54 @@ typedef uintmax_t uatomic_max_t;
   ({ __typeof (*mem) ret;                                                    \
      __asm __volatile (LOCK_PREFIX "cmpxchgq %q2, %1"                        \
                       : "=a" (ret), "=m" (*mem)                              \
-                      : "r" ((long) (newval)), "m" (*mem),                   \
-                        "0" ((long) (oldval)));                              \
+                      : "r" ((long int) (newval)), "m" (*mem),               \
+                        "0" ((long int) (oldval)));                          \
+     ret; })
+
+
+#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;                                                    \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                                \
+                     "je 0f\n\t"                                             \
+                     "lock\n"                                                \
+                      "0:\tcmpxchgb %b2, %1"                                 \
+                      : "=a" (ret), "=m" (*mem)                              \
+                      : "q" (newval), "m" (*mem), "0" (oldval),              \
+                        "i" (offsetof (tcbhead_t, multiple_threads)));       \
+     ret; })
+
+#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;                                                    \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                                \
+                     "je 0f\n\t"                                             \
+                     "lock\n"                                                \
+                      "0:\tcmpxchgw %w2, %1"                                 \
+                      : "=a" (ret), "=m" (*mem)                              \
+                      : "q" (newval), "m" (*mem), "0" (oldval),              \
+                        "i" (offsetof (tcbhead_t, multiple_threads)));       \
+     ret; })
+
+#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;                                                    \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                                \
+                     "je 0f\n\t"                                             \
+                     "lock\n"                                                \
+                      "0:\tcmpxchgl %2, %1"                                  \
+                      : "=a" (ret), "=m" (*mem)                              \
+                      : "q" (newval), "m" (*mem), "0" (oldval),              \
+                        "i" (offsetof (tcbhead_t, multiple_threads)));       \
+     ret; })
+
+#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;                                                    \
+     __asm __volatile ("cmpl $0, %%fs:%P5\n\t"                               \
+                      "je 0f\n\t"                                            \
+                      "lock\n"                                               \
+                      "0:\tcmpxchgq %q2, %1"                                 \
+                      : "=a" (ret), "=m" (*mem)                              \
+                      : "q" ((long int) (newval)), "m" (*mem),               \
+                        "0" ((long int)oldval),                              \
+                        "i" (offsetof (tcbhead_t, multiple_threads)));       \
      ret; })
 
 
@@ -107,49 +154,76 @@ typedef uintmax_t uatomic_max_t;
      result; })
 
 
-#define atomic_exchange_and_add(mem, value) \
+#define __arch_exchange_and_add_body(lock, mem, value)                       \
   ({ __typeof (*mem) result;                                                 \
      if (sizeof (*mem) == 1)                                                 \
-       __asm __volatile (LOCK_PREFIX "xaddb %b0, %1"                         \
+       __asm __volatile (lock "xaddb %b0, %1"                                \
                         : "=r" (result), "=m" (*mem)                         \
-                        : "0" (value), "m" (*mem));                          \
+                        : "0" (value), "m" (*mem),                           \
+                          "i" (offsetof (tcbhead_t, multiple_threads)));     \
      else if (sizeof (*mem) == 2)                                            \
-       __asm __volatile (LOCK_PREFIX "xaddw %w0, %1"                         \
+       __asm __volatile (lock "xaddw %w0, %1"                                \
                         : "=r" (result), "=m" (*mem)                         \
-                        : "0" (value), "m" (*mem));                          \
+                        : "0" (value), "m" (*mem),                           \
+                          "i" (offsetof (tcbhead_t, multiple_threads)));     \
      else if (sizeof (*mem) == 4)                                            \
-       __asm __volatile (LOCK_PREFIX "xaddl %0, %1"                          \
+       __asm __volatile (lock "xaddl %0, %1"                                 \
                         : "=r" (result), "=m" (*mem)                         \
-                        : "0" (value), "m" (*mem));                          \
+                        : "0" (value), "m" (*mem),                           \
+                          "i" (offsetof (tcbhead_t, multiple_threads)));     \
      else                                                                    \
-       __asm __volatile (LOCK_PREFIX "xaddq %q0, %1"                         \
+       __asm __volatile (lock "xaddq %q0, %1"                                \
                         : "=r" (result), "=m" (*mem)                         \
-                        : "0" ((long) (value)), "m" (*mem));                 \
+                        : "0" ((long) (value)), "m" (*mem),                  \
+                          "i" (offsetof (tcbhead_t, multiple_threads)));     \
      result; })
 
+#define atomic_exchange_and_add(mem, value) \
+  __arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
+
+#define __arch_exchange_and_add_cprefix \
+  "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_exchange_and_add(mem, value) \
+  __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
+
+
+#define __arch_add_body(lock, pfx, mem, value)                               \
+  do {                                                                       \
+    if (__builtin_constant_p (value) && (value) == 1)                        \
+      pfx##_increment (mem);                                                 \
+    else if (__builtin_constant_p (value) && (value) == -1)                  \
+      pfx##_decrement (mem);                                                 \
+    else if (sizeof (*mem) == 1)                                             \
+      __asm __volatile (lock "addb %b1, %0"                                  \
+                       : "=m" (*mem)                                         \
+                       : "ir" (value), "m" (*mem),                           \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 2)                                             \
+      __asm __volatile (lock "addw %w1, %0"                                  \
+                       : "=m" (*mem)                                         \
+                       : "ir" (value), "m" (*mem),                           \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 4)                                             \
+      __asm __volatile (lock "addl %1, %0"                                   \
+                       : "=m" (*mem)                                         \
+                       : "ir" (value), "m" (*mem),                           \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else                                                                     \
+      __asm __volatile (lock "addq %q1, %0"                                  \
+                       : "=m" (*mem)                                         \
+                       : "ir" ((long) (value)), "m" (*mem),                  \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+  } while (0)
 
 #define atomic_add(mem, value) \
-  (void) ({ if (__builtin_constant_p (value) && (value) == 1)                \
-             atomic_increment (mem);                                         \
-           else if (__builtin_constant_p (value) && (value) == 1)            \
-             atomic_decrement (mem);                                         \
-           else if (sizeof (*mem) == 1)                                      \
-             __asm __volatile (LOCK_PREFIX "addb %b1, %0"                    \
-                               : "=m" (*mem)                                 \
-                               : "ir" (value), "m" (*mem));                  \
-           else if (sizeof (*mem) == 2)                                      \
-             __asm __volatile (LOCK_PREFIX "addw %w1, %0"                    \
-                               : "=m" (*mem)                                 \
-                               : "ir" (value), "m" (*mem));                  \
-           else if (sizeof (*mem) == 4)                                      \
-             __asm __volatile (LOCK_PREFIX "addl %1, %0"                     \
-                               : "=m" (*mem)                                 \
-                               : "ir" (value), "m" (*mem));                  \
-           else                                                              \
-             __asm __volatile (LOCK_PREFIX "addq %q1, %0"                    \
-                               : "=m" (*mem)                                 \
-                               : "ir" ((long) (value)), "m" (*mem));         \
-           })
+  __arch_add_body (LOCK_PREFIX, atomic, mem, value)
+
+#define __arch_add_cprefix \
+  "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_add(mem, value) \
+  __arch_add_body (__arch_add_cprefix, catomic, mem, value)
 
 
 #define atomic_add_negative(mem, value) \
@@ -194,24 +268,37 @@ typedef uintmax_t uatomic_max_t;
      __result; })
 
 
-#define atomic_increment(mem) \
-  (void) ({ if (sizeof (*mem) == 1)                                          \
-             __asm __volatile (LOCK_PREFIX "incb %b0"                        \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           else if (sizeof (*mem) == 2)                                      \
-             __asm __volatile (LOCK_PREFIX "incw %w0"                        \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           else if (sizeof (*mem) == 4)                                      \
-             __asm __volatile (LOCK_PREFIX "incl %0"                         \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           else                                                              \
-             __asm __volatile (LOCK_PREFIX "incq %q0"                        \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           })
+#define __arch_increment_body(lock, mem) \
+  do {                                                                       \
+    if (sizeof (*mem) == 1)                                                  \
+      __asm __volatile (lock "incb %b0"                                              \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 2)                                             \
+      __asm __volatile (lock "incw %w0"                                              \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 4)                                             \
+      __asm __volatile (lock "incl %0"                                       \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else                                                                     \
+      __asm __volatile (lock "incq %q0"                                              \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+  } while (0)
+
+#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
+
+#define __arch_increment_cprefix \
+  "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_increment(mem) \
+  __arch_increment_body (__arch_increment_cprefix, mem)
 
 
 #define atomic_increment_and_test(mem) \
@@ -235,24 +322,37 @@ typedef uintmax_t uatomic_max_t;
      __result; })
 
 
-#define atomic_decrement(mem) \
-  (void) ({ if (sizeof (*mem) == 1)                                          \
-             __asm __volatile (LOCK_PREFIX "decb %b0"                        \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           else if (sizeof (*mem) == 2)                                      \
-             __asm __volatile (LOCK_PREFIX "decw %w0"                        \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           else if (sizeof (*mem) == 4)                                      \
-             __asm __volatile (LOCK_PREFIX "decl %0"                         \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           else                                                              \
-             __asm __volatile (LOCK_PREFIX "decq %q0"                        \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem));                                \
-           })
+#define __arch_decrement_body(lock, mem) \
+  do {                                                                       \
+    if (sizeof (*mem) == 1)                                                  \
+      __asm __volatile (lock "decb %b0"                                              \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 2)                                             \
+      __asm __volatile (lock "decw %w0"                                              \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 4)                                             \
+      __asm __volatile (lock "decl %0"                                       \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else                                                                     \
+      __asm __volatile (lock "decq %q0"                                              \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem),                                         \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+  } while (0)
+
+#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
+
+#define __arch_decrement_cprefix \
+  "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_decrement(mem) \
+  __arch_decrement_body (__arch_decrement_cprefix, mem)
 
 
 #define atomic_decrement_and_test(mem) \
@@ -277,27 +377,28 @@ typedef uintmax_t uatomic_max_t;
 
 
 #define atomic_bit_set(mem, bit) \
-  (void) ({ if (sizeof (*mem) == 1)                                          \
-             __asm __volatile (LOCK_PREFIX "orb %b2, %0"                     \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem), "ir" (1L << (bit)));            \
-           else if (sizeof (*mem) == 2)                                      \
-             __asm __volatile (LOCK_PREFIX "orw %w2, %0"                     \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem), "ir" (1L << (bit)));            \
-           else if (sizeof (*mem) == 4)                                      \
-             __asm __volatile (LOCK_PREFIX "orl %2, %0"                      \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem), "ir" (1L << (bit)));            \
-           else if (__builtin_constant_p (bit) && (bit) < 32)                \
-             __asm __volatile (LOCK_PREFIX "orq %2, %0"                      \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem), "i" (1L << (bit)));             \
-           else                                                              \
-             __asm __volatile (LOCK_PREFIX "orq %q2, %0"                     \
-                               : "=m" (*mem)                                 \
-                               : "m" (*mem), "r" (1UL << (bit)));            \
-           })
+  do {                                                                       \
+    if (sizeof (*mem) == 1)                                                  \
+      __asm __volatile (LOCK_PREFIX "orb %b2, %0"                            \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem), "ir" (1L << (bit)));                    \
+    else if (sizeof (*mem) == 2)                                             \
+      __asm __volatile (LOCK_PREFIX "orw %w2, %0"                            \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem), "ir" (1L << (bit)));                    \
+    else if (sizeof (*mem) == 4)                                             \
+      __asm __volatile (LOCK_PREFIX "orl %2, %0"                             \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem), "ir" (1L << (bit)));                    \
+    else if (__builtin_constant_p (bit) && (bit) < 32)                       \
+      __asm __volatile (LOCK_PREFIX "orq %2, %0"                             \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem), "i" (1L << (bit)));                     \
+    else                                                                     \
+      __asm __volatile (LOCK_PREFIX "orq %q2, %0"                            \
+                       : "=m" (*mem)                                         \
+                       : "m" (*mem), "r" (1UL << (bit)));                    \
+  } while (0)
 
 
 #define atomic_bit_test_set(mem, bit) \
@@ -322,3 +423,56 @@ typedef uintmax_t uatomic_max_t;
 
 
 #define atomic_delay() asm ("rep; nop")
+
+
+#define atomic_and(mem, mask) \
+  do {                                                                       \
+    if (sizeof (*mem) == 1)                                                  \
+      __asm __volatile (LOCK_PREFIX "andb %1, %b0"                           \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem));                           \
+    else if (sizeof (*mem) == 2)                                             \
+      __asm __volatile (LOCK_PREFIX "andw %1, %w0"                           \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem));                           \
+    else if (sizeof (*mem) == 4)                                             \
+      __asm __volatile (LOCK_PREFIX "andl %1, %0"                            \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem));                           \
+    else                                                                     \
+      __asm __volatile (LOCK_PREFIX "andq %1, %q0"                           \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem));                           \
+  } while (0)
+
+
+#define __arch_or_body(lock, mem, mask)                                              \
+  do {                                                                       \
+    if (sizeof (*mem) == 1)                                                  \
+      __asm __volatile (lock "orb %1, %b0"                                   \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem),                            \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 2)                                             \
+      __asm __volatile (lock "orw %1, %w0"                                   \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem),                            \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 4)                                             \
+      __asm __volatile (lock "orl %1, %0"                                    \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem),                            \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else                                                                     \
+      __asm __volatile (lock "orq %1, %q0"                                   \
+                       : "=m" (*mem)                                         \
+                       : "ir" (mask), "m" (*mem),                            \
+                         "i" (offsetof (tcbhead_t, multiple_threads)));      \
+  } while (0)
+
+#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
+
+#define __arch_or_cprefix \
+  "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)