#define __ha_barrier_full() do { } while (0)
#define __ha_compiler_barrier() do { } while (0)
#define __ha_cpu_relax() ({ 1; })
+#define __ha_cpu_relax_for_read() ({ 1; })
#else /* !USE_THREAD */
/* short-lived CPU relaxation */
#define __ha_cpu_relax() ({ asm volatile("rep;nop\n"); 1; })
+/* dummy relaxation: x86 prefers not to wait at all in read loops */
+#define __ha_cpu_relax_for_read() ({ 1; })
+
#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
static __inline void
/* short-lived CPU relaxation */
#define __ha_cpu_relax() ({ asm volatile(""); 1; })
+/* short wait in read loops */
+#define __ha_cpu_relax_for_read() ({ asm volatile(""); 1; })
+
#elif defined (__aarch64__)
static __inline void
*/
#define __ha_cpu_relax() ({ asm volatile("isb" ::: "memory"); 1; })
+/* aarch64 prefers to wait for real in read loops */
+#define __ha_cpu_relax_for_read() ({ asm volatile("isb" ::: "memory"); 1; })
+
#if defined(__ARM_FEATURE_ATOMICS) && !defined(__clang__) // ARMv8.1-A atomics
/* returns 0 on failure, non-zero on success */
/* short-lived CPU relaxation */
#define __ha_cpu_relax() ({ asm volatile(""); 1; })
+/* default wait in read loops */
+#define __ha_cpu_relax_for_read() ({ asm volatile(""); 1; })
+
#endif /* end of arch-specific barrier/dwcas */
static inline void __ha_compiler_barrier(void)