]> git.ipfire.org Git - thirdparty/openssl.git/commitdiff
rcu: Ensure that updates to the ID field of a qp don't lose refs
authorNeil Horman <nhorman@openssl.org>
Sun, 19 Jan 2025 21:43:37 +0000 (16:43 -0500)
committerTomas Mraz <tomas@openssl.org>
Wed, 22 Jan 2025 19:16:11 +0000 (20:16 +0100)
ppc64le occasionally still fails the threadstest on __rcu_torture

From several days of debugging, I think I've landed on the problem.

Occasionally, under high load I observe the following pattern

CPU0                                     CPU1
update_qp                                get_hold_current_qp
  atomic_and_fetch(qp->users, ID_MASK, RELEASE)
                                         atomic_add_fetch(qp->users, 1, RELEASE
  atomic_or_fetch(qp->users, ID_VAL++, RELEASE)

When this pattern occurs, the atomic or operation fails to see the published
value of CPU1 and when the or-ed value is written back to ram, the incremented
value in get_hold_current_qp is overwritten, meaning the hold that the reader
placed on the rcu lock is lost, allowing the writer to complete early, freeing
memory before a reader is done reading any held memory.

Why this is only observed on ppc64le I'm not sure, but it seems like a pretty
clear problem.

fix it by implementing ATOMIC_COMPARE_EXCHANGE_N, so that, on the write side in
update_qp, we can ensure that updates are only done if the read side hasn't
changed anything.  If it has, retry the operation.

With this fix, I'm able to run the threads test overnight (4000 iterations and
counting) without failure.

Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/26478)

crypto/threads_pthread.c

index 4e8df06207bb1273df704efe9961472854b44e30..f33014e527322d25879c636436961f05e8fa5fa1 100644 (file)
@@ -129,6 +129,7 @@ static inline void *apple_atomic_load_n_pvoid(void **p,
 #  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
 #  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
 #  define ATOMIC_EXCHANGE_N(t, p, v, o) __atomic_exchange_n(p, v, o)
+#  define ATOMIC_COMPARE_EXCHANGE_N(t, p, e, d, s, f) __atomic_compare_exchange_n(p, e, d, 0, s, f)
 #  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
 #  define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
 #  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
@@ -197,6 +198,23 @@ IMPL_fallback_atomic_exchange_n(prcu_cb_item)
 
 #  define ATOMIC_EXCHANGE_N(t, p, v, o) fallback_atomic_exchange_n_##t(p, v)
 
+#  define IMPL_fallback_atomic_compare_exchange_n(t)                                  \
+    static ossl_inline int fallback_atomic_compare_exchange_n_##t(t *p, t *e, t d, s, f) \
+    {                                                                                 \
+        int ret = 1;                                                                 \
+        pthread_mutex_lock(&atomic_sim_lock);                                         \
+        if (*p == *e)                                                                 \
+            *p = d;                                                                    \
+        else                                                                          \
+            ret = 0;                                                                   \
+        pthread_mutex_unlock(&atomic_sim_lock);                                       \
+        return ret;                                                                   \
+    }
+
+IMPL_fallback_atomic_exchange_n(uint64_t)
+
+#  define ATOMIC_COMPARE_EXCHANGE_N(t, p, e, d, s, f) fallback_atomic_compare_exchange_n_##t(p, e, d, s, f)
+
 /*
  * The fallbacks that follow don't need any per type implementation, as
  * they are designed for uint64_t only.  If there comes a time when multiple
@@ -506,6 +524,8 @@ void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
 {
     uint64_t new_id;
+    uint64_t update;
+    uint64_t ret;
     uint32_t current_idx;
 
     pthread_mutex_lock(&lock->alloc_lock);
@@ -538,10 +558,13 @@ static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
      * of this update are published to the read side prior to updating the
      * reader idx below
      */
-    ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
-                     __ATOMIC_RELEASE);
-    ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
-                    __ATOMIC_RELEASE);
+try_again:
+    ret = ATOMIC_LOAD_N(uint64_t, &lock->qp_group[current_idx].users, __ATOMIC_ACQUIRE);
+    update = ret & ID_MASK;
+    update |= new_id;
+    if (!ATOMIC_COMPARE_EXCHANGE_N(uint64_t, &lock->qp_group[current_idx].users, &ret, update,
+                                   __ATOMIC_ACQ_REL, __ATOMIC_RELAXED))
+        goto try_again;
 
     /*
      * Update the reader index to be the prior qp.