From: Wilco Dijkstra Date: Tue, 9 Sep 2025 10:29:55 +0000 (+0000) Subject: atomic: Switch hppa to builtin atomics X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9c72e0193db60096d7ca9aa2cc2c5d1369c2b7dd;p=thirdparty%2Fglibc.git atomic: Switch hppa to builtin atomics Switch hppa to builtin atomics. Reviewed-by: Adhemerval Zanella  --- diff --git a/sysdeps/unix/sysv/linux/hppa/atomic-machine.h b/sysdeps/unix/sysv/linux/hppa/atomic-machine.h index 85b820ddd6..0e2f6e441e 100644 --- a/sysdeps/unix/sysv/linux/hppa/atomic-machine.h +++ b/sysdeps/unix/sysv/linux/hppa/atomic-machine.h @@ -21,84 +21,9 @@ #define atomic_full_barrier() __sync_synchronize () #define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* We use the compiler atomic load and store builtins as the generic - defines are not atomic. In particular, we need to use compare and - exchange for stores as the implementation is synthesized. */ -void __atomic_link_error (void); -#define __atomic_check_size_ls(mem) \ - if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \ - __atomic_link_error (); - -#define atomic_load_relaxed(mem) \ - ({ __atomic_check_size_ls((mem)); \ - __atomic_load_n ((mem), __ATOMIC_RELAXED); }) -#define atomic_load_acquire(mem) \ - ({ __atomic_check_size_ls((mem)); \ - __atomic_load_n ((mem), __ATOMIC_ACQUIRE); }) - -#define atomic_store_relaxed(mem, val) \ - do { \ - __atomic_check_size_ls((mem)); \ - __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \ - } while (0) -#define atomic_store_release(mem, val) \ - do { \ - __atomic_check_size_ls((mem)); \ - __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \ - } while (0) +#define USE_ATOMIC_COMPILER_BUILTINS 1 /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 1 -/* prev = *addr; - if (prev == old) - *addr = new; - return prev; */ - -/* Use the kernel atomic light weight syscalls on hppa. */ -#define _LWS "0xb0" -#define _LWS_CAS "0" -/* Note r31 is the link register. */ -#define _LWS_CLOBBER "r1", "r23", "r22", "r20", "r31", "memory" -/* String constant for -EAGAIN. */ -#define _ASM_EAGAIN "-11" -/* String constant for -EDEADLOCK. */ -#define _ASM_EDEADLOCK "-45" - -/* The only basic operation needed is compare and exchange. The mem - pointer must be word aligned. We no longer loop on deadlock. */ -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - register long lws_errno asm("r21"); \ - register unsigned long lws_ret asm("r28"); \ - register unsigned long lws_mem asm("r26") = (unsigned long)(mem); \ - register unsigned long lws_old asm("r25") = (unsigned long)(oldval);\ - register unsigned long lws_new asm("r24") = (unsigned long)(newval);\ - __asm__ __volatile__( \ - "0: \n\t" \ - "ble " _LWS "(%%sr2, %%r0) \n\t" \ - "ldi " _LWS_CAS ", %%r20 \n\t" \ - "cmpiclr,<> " _ASM_EAGAIN ", %%r21, %%r0\n\t" \ - "b,n 0b \n\t" \ - "cmpclr,= %%r0, %%r21, %%r0 \n\t" \ - "iitlbp %%r0,(%%sr0, %%r0) \n\t" \ - : "=r" (lws_ret), "=r" (lws_errno) \ - : "r" (lws_mem), "r" (lws_old), "r" (lws_new) \ - : _LWS_CLOBBER \ - ); \ - \ - (__typeof (oldval)) lws_ret; \ - }) - -#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ \ - __typeof__ (*mem) ret; \ - ret = atomic_compare_and_exchange_val_acq(mem, newval, oldval); \ - /* Return 1 if it was already acquired. */ \ - (ret != oldval); \ - }) - #endif -/* _ATOMIC_MACHINE_H */