]> git.ipfire.org Git - thirdparty/bind9.git/commitdiff
Fix data race in RCU pointer exchange operations
authorOndřej Surý <ondrej@sury.org>
Thu, 19 Mar 2026 06:30:24 +0000 (07:30 +0100)
committerOndřej Surý <ondrej@sury.org>
Thu, 19 Mar 2026 07:10:22 +0000 (08:10 +0100)
The liburcu rcu_cmpxchg_pointer() uses CMM_RELAXED ordering on the CAS
failure path.  When a thread loses the CAS and gets another thread's
pointer back, reading fields through that pointer is a data race on
weakly-ordered architectures (ARM, POWER) because the failing load has
no acquire semantics.

Override rcu_cmpxchg_pointer() and rcu_xchg_pointer() to use standard
__atomic builtins with __ATOMIC_ACQ_REL (success) and __ATOMIC_ACQUIRE
(failure) ordering.  This fixes the race on all architectures and is
natively visible to ThreadSanitizer.

lib/isc/include/isc/urcu.h

index fc34fcf2653ab2755702d2e306601d2e9b119ab4..20c2902d43ec8afd8b3bf816737651e2446a3948 100644 (file)
 #endif /* !defined(caa_container_of_check_null) */
 /* clang-format on */
 
+/*
+ * Override rcu_cmpxchg_pointer and rcu_xchg_pointer to use acquire/release
+ * ordering.  The liburcu defaults use CMM_RELAXED on the CAS failure path,
+ * which means the returned pointer has no ordering guarantees — reading
+ * fields through it is a data race on weakly-ordered architectures.
+ *
+ * Using __ATOMIC_ACQ_REL for success (release for the publisher, acquire
+ * for the next reader) and __ATOMIC_ACQUIRE for failure (so the losing
+ * thread sees the winner's writes) fixes this and is also natively visible
+ * to ThreadSanitizer.
+ */
+#undef rcu_cmpxchg_pointer
+#define rcu_cmpxchg_pointer(p, old, _new)                                      \
+       __extension__({                                                        \
+               __typeof__(*(p)) ___old = (old);                               \
+               (void)__atomic_compare_exchange_n((p), &___old, (_new), false, \
+                                                 __ATOMIC_ACQ_REL,            \
+                                                 __ATOMIC_ACQUIRE);           \
+               ___old;                                                        \
+       })
+
+#undef rcu_xchg_pointer
+#define rcu_xchg_pointer(p, v) __atomic_exchange_n((p), (v), __ATOMIC_ACQ_REL)
+
 #ifdef __SANITIZE_THREAD__
 
 /*
 #undef _CMM_STORE_SHARED
 #define _CMM_STORE_SHARED(x, v) CMM_STORE_SHARED(x, v)
 
-#endif
+#endif /* __SANITIZE_THREAD__ */