]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
powerpc/qspinlock: don't propagate the not-sleepy state
authorNicholas Piggin <npiggin@gmail.com>
Mon, 16 Oct 2023 12:43:03 +0000 (22:43 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 20 Oct 2023 11:43:34 +0000 (22:43 +1100)
To simplify things, don't propagate the not-sleepy condition back down
the queue. Instead, have the waiters clear their own node->sleepy when
finding the lock owner is not preempted.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Reviewed-by: "Nysal Jan K.A" <nysal@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20231016124305.139923-5-npiggin@gmail.com
arch/powerpc/lib/qspinlock.c

index 0932d24a6b07331173ff71d44a678f20257d5669..6bb627e90a321ee8dae7d0cdb84e9947d518f9dd 100644 (file)
@@ -350,7 +350,7 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
        return __yield_to_locked_owner(lock, val, paravirt, mustq);
 }
 
-static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *set_sleepy, bool paravirt)
+static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
 {
        struct qnode *next;
        int owner;
@@ -359,18 +359,17 @@ static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *
                return;
        if (!pv_yield_propagate_owner)
                return;
-       if (*set_sleepy)
-               return;
 
        next = READ_ONCE(node->next);
        if (!next)
                return;
 
+       if (next->sleepy)
+               return;
+
        owner = get_owner_cpu(val);
-       if (vcpu_is_preempted(owner)) {
+       if (vcpu_is_preempted(owner))
                next->sleepy = 1;
-               *set_sleepy = true;
-       }
 }
 
 /* Called inside spin_begin() */
@@ -385,12 +384,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
        if (!pv_yield_propagate_owner)
                goto yield_prev;
 
-       if (!READ_ONCE(node->sleepy)) {
-               /* Propagate back sleepy==false */
-               if (node->next && node->next->sleepy)
-                       node->next->sleepy = 0;
-               goto yield_prev;
-       } else {
+       if (node->sleepy) {
                u32 val = READ_ONCE(lock->val);
 
                if (val & _Q_LOCKED_VAL) {
@@ -410,6 +404,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
                        if (preempted)
                                return preempted;
                }
+               node->sleepy = false;
        }
 
 yield_prev:
@@ -533,7 +528,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
        bool sleepy = false;
        bool mustq = false;
        int idx;
-       bool set_sleepy = false;
        int iters = 0;
 
        BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -591,10 +585,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
                spec_barrier();
                spin_end();
 
-               /* Clear out stale propagated sleepy */
-               if (paravirt && pv_yield_propagate_owner && node->sleepy)
-                       node->sleepy = 0;
-
                smp_rmb(); /* acquire barrier for the mcs lock */
 
                /*
@@ -636,7 +626,7 @@ again:
                        }
                }
 
-               propagate_sleepy(node, val, &set_sleepy, paravirt);
+               propagate_sleepy(node, val, paravirt);
                preempted = yield_head_to_locked_owner(lock, val, paravirt);
                if (!maybe_stealers)
                        continue;