From: Ondřej Surý Date: Thu, 19 Mar 2026 06:30:24 +0000 (+0100) Subject: Fix data race in RCU pointer exchange operations X-Git-Tag: v9.21.21~25^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9457f4f8c5259d4ece0eb47225075d5e885701ba;p=thirdparty%2Fbind9.git Fix data race in RCU pointer exchange operations The liburcu rcu_cmpxchg_pointer() uses CMM_RELAXED ordering on the CAS failure path. When a thread loses the CAS and gets another thread's pointer back, reading fields through that pointer is a data race on weakly-ordered architectures (ARM, POWER) because the failing load has no acquire semantics. Override rcu_cmpxchg_pointer() and rcu_xchg_pointer() to use standard __atomic builtins with __ATOMIC_ACQ_REL (success) and __ATOMIC_ACQUIRE (failure) ordering. This fixes the race on all architectures and is natively visible to ThreadSanitizer. --- diff --git a/lib/isc/include/isc/urcu.h b/lib/isc/include/isc/urcu.h index fc34fcf2653..20c2902d43e 100644 --- a/lib/isc/include/isc/urcu.h +++ b/lib/isc/include/isc/urcu.h @@ -142,6 +142,30 @@ #endif /* !defined(caa_container_of_check_null) */ /* clang-format on */ +/* + * Override rcu_cmpxchg_pointer and rcu_xchg_pointer to use acquire/release + * ordering. The liburcu defaults use CMM_RELAXED on the CAS failure path, + * which means the returned pointer has no ordering guarantees — reading + * fields through it is a data race on weakly-ordered architectures. + * + * Using __ATOMIC_ACQ_REL for success (release for the publisher, acquire + * for the next reader) and __ATOMIC_ACQUIRE for failure (so the losing + * thread sees the winner's writes) fixes this and is also natively visible + * to ThreadSanitizer. + */ +#undef rcu_cmpxchg_pointer +#define rcu_cmpxchg_pointer(p, old, _new) \ + __extension__({ \ + __typeof__(*(p)) ___old = (old); \ + (void)__atomic_compare_exchange_n((p), &___old, (_new), false, \ + __ATOMIC_ACQ_REL, \ + __ATOMIC_ACQUIRE); \ + ___old; \ + }) + +#undef rcu_xchg_pointer +#define rcu_xchg_pointer(p, v) __atomic_exchange_n((p), (v), __ATOMIC_ACQ_REL) + #ifdef __SANITIZE_THREAD__ /* @@ -169,4 +193,4 @@ #undef _CMM_STORE_SHARED #define _CMM_STORE_SHARED(x, v) CMM_STORE_SHARED(x, v) -#endif +#endif /* __SANITIZE_THREAD__ */