]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
cheri: Implement 128-bit atomics
authorSzabolcs Nagy <szabolcs.nagy@arm.com>
Tue, 8 Jun 2021 11:48:43 +0000 (12:48 +0100)
committerSzabolcs Nagy <szabolcs.nagy@arm.com>
Fri, 5 Aug 2022 18:45:19 +0000 (19:45 +0100)
Arm Morello requires 128-bit atomics.

include/atomic.h
sysdeps/aarch64/atomic-machine.h

index 2cb52c9cfd894308b97b97a04dd574b2287bf1b2..140ef2a5a5c7f71305650f22b9767a6632444963 100644 (file)
@@ -62,6 +62,8 @@
       __atg1_result = pre##_32_##post (mem, __VA_ARGS__);                    \
     else if (sizeof (*mem) == 8)                                             \
       __atg1_result = pre##_64_##post (mem, __VA_ARGS__);                    \
+    else if (sizeof (*mem) == 16)                                            \
+      __atg1_result = pre##_128_##post (mem, __VA_ARGS__);                   \
     else                                                                     \
       abort ();                                                                      \
     __atg1_result;                                                           \
@@ -77,6 +79,8 @@
       __atg2_result = pre##_32_##post (mem, __VA_ARGS__);                    \
     else if (sizeof (*mem) == 8)                                             \
       __atg2_result = pre##_64_##post (mem, __VA_ARGS__);                    \
+    else if (sizeof (*mem) == 16)                                            \
+      __atg2_result = pre##_128_##post (mem, __VA_ARGS__);                   \
     else                                                                     \
       abort ();                                                                      \
     __atg2_result;                                                           \
 /* We require 32b atomic operations; some archs also support 64b atomic
    operations.  */
 void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size(mem) \
+   if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size(mem) \
    if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8))                         \
      __atomic_link_error ();
@@ -553,7 +561,12 @@ void __atomic_link_error (void);
    need other atomic operations of such sizes, and restricting the support to
    loads and stores makes this easier for archs that do not have native
    support for atomic operations to less-than-word-sized data.  */
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size_ls(mem) \
+   if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
+       && (sizeof (*mem) != 8) && (sizeof (*mem) != 16))                     \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size_ls(mem) \
    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
        && (sizeof (*mem) != 8))                                                      \
index 52b3fb2047cbf2518644b1595ed9a11cddcc89d2..14e948139286b09d8ef8184e3e0fd201d56422d2 100644 (file)
                                  model, __ATOMIC_RELAXED);             \
   })
 
+#  define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
+  ({                                                                   \
+    typeof (*mem) __oldval = (oldval);                                 \
+    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,  \
+                                 model, __ATOMIC_RELAXED);             \
+  })
+
 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
   ({                                                                   \
     typeof (*mem) __oldval = (oldval);                                 \
     __oldval;                                                          \
   })
 
+#  define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
+  ({                                                                   \
+    typeof (*mem) __oldval = (oldval);                                 \
+    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,   \
+                                model, __ATOMIC_RELAXED);              \
+    __oldval;                                                          \
+  })
+
 
 /* Compare and exchange with "acquire" semantics, ie barrier after.  */
 
 #  define __arch_exchange_64_int(mem, newval, model)   \
   __atomic_exchange_n (mem, newval, model)
 
+#  define __arch_exchange_128_int(mem, newval, model)  \
+  __atomic_exchange_n (mem, newval, model)
+
 # define atomic_exchange_acq(mem, value)                               \
   __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
 
 #  define __arch_exchange_and_add_64_int(mem, value, model)    \
   __atomic_fetch_add (mem, value, model)
 
+#  define __arch_exchange_and_add_128_int(mem, value, model)   \
+  __atomic_fetch_add (mem, value, model)
+
 # define atomic_exchange_and_add_acq(mem, value)                       \
   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,       \
                       __ATOMIC_ACQUIRE)