]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
locking: Copy out qspinlock.c to kernel/bpf/rqspinlock.c
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Sun, 16 Mar 2025 04:05:20 +0000 (21:05 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 19 Mar 2025 15:03:04 +0000 (08:03 -0700)
In preparation for introducing a new lock implementation, Resilient
Queued Spin Lock, or rqspinlock, we first begin our modifications by
using the existing qspinlock.c code as the base. Simply copy the code to
a new file and rename functions and variables from 'queued' to
'resilient_queued'.

Since we place the file in kernel/bpf, include needs to be relative.

This helps each subsequent commit in clearly showing how and where the
code is being changed. The only change after a literal copy in this
commit is renaming the functions where necessary, and rename qnodes to
rqnodes. Let's also use EXPORT_SYMBOL_GPL for rqspinlock slowpath.

Reviewed-by: Barret Rhoden <brho@google.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250316040541.108729-5-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/rqspinlock.c [new file with mode: 0644]

diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c
new file mode 100644 (file)
index 0000000..762108c
--- /dev/null
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Resilient Queued Spin Lock
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ * (C) Copyright 2013-2014,2018 Red Hat, Inc.
+ * (C) Copyright 2015 Intel Corp.
+ * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <longman@redhat.com>
+ *          Peter Zijlstra <peterz@infradead.org>
+ */
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
+#include <linux/smp.h>
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/prefetch.h>
+#include <asm/byteorder.h>
+#include <asm/qspinlock.h>
+#include <trace/events/lock.h>
+
+/*
+ * Include queued spinlock definitions and statistics code
+ */
+#include "../locking/qspinlock.h"
+#include "../locking/qspinlock_stat.h"
+
+/*
+ * The basic principle of a queue-based spinlock can best be understood
+ * by studying a classic queue-based spinlock implementation called the
+ * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
+ * Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and
+ * Scott") is available at
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=206115
+ *
+ * This queued spinlock implementation is based on the MCS lock, however to
+ * make it fit the 4 bytes we assume spinlock_t to be, and preserve its
+ * existing API, we must modify it somehow.
+ *
+ * In particular; where the traditional MCS lock consists of a tail pointer
+ * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
+ * unlock the next pending (next->locked), we compress both these: {tail,
+ * next->locked} into a single u32 value.
+ *
+ * Since a spinlock disables recursion of its own context and there is a limit
+ * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
+ * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
+ * we can encode the tail by combining the 2-bit nesting level with the cpu
+ * number. With one byte for the lock value and 3 bytes for the tail, only a
+ * 32-bit word is now needed. Even though we only need 1 bit for the lock,
+ * we extend it to a full byte to achieve better performance for architectures
+ * that support atomic byte write.
+ *
+ * We also change the first spinner to spin on the lock bit instead of its
+ * node; whereby avoiding the need to carry a node from lock to unlock, and
+ * preserving existing lock API. This also makes the unlock code simpler and
+ * faster.
+ *
+ * N.B. The current implementation only supports architectures that allow
+ *      atomic operations on smaller 8-bit and 16-bit data types.
+ *
+ */
+
+#include "../locking/mcs_spinlock.h"
+
+/*
+ * Per-CPU queue node structures; we can never have more than 4 nested
+ * contexts: task, softirq, hardirq, nmi.
+ *
+ * Exactly fits one 64-byte cacheline on a 64-bit architecture.
+ *
+ * PV doubles the storage and uses the second cacheline for PV state.
+ */
+static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
+
+/*
+ * Generate the native code for resilient_queued_spin_unlock_slowpath(); provide NOPs
+ * for all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
+                                          struct mcs_spinlock *prev) { }
+static __always_inline void __pv_kick_node(struct qspinlock *lock,
+                                          struct mcs_spinlock *node) { }
+static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
+                                                  struct mcs_spinlock *node)
+                                                  { return 0; }
+
+#define pv_enabled()           false
+
+#define pv_init_node           __pv_init_node
+#define pv_wait_node           __pv_wait_node
+#define pv_kick_node           __pv_kick_node
+#define pv_wait_head_or_lock   __pv_wait_head_or_lock
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define resilient_queued_spin_lock_slowpath    native_resilient_queued_spin_lock_slowpath
+#endif
+
+#endif /* _GEN_PV_LOCK_SLOWPATH */
+
+/**
+ * resilient_queued_spin_lock_slowpath - acquire the queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ * @val: Current value of the queued spinlock 32-bit word
+ *
+ * (queue tail, pending bit, lock value)
+ *
+ *              fast     :    slow                                  :    unlock
+ *                       :                                          :
+ * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
+ *                       :       | ^--------.------.             /  :
+ *                       :       v           \      \            |  :
+ * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
+ *                       :       | ^--'              |           |  :
+ *                       :       v                   |           |  :
+ * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
+ *   queue               :       | ^--'                          |  :
+ *                       :       v                               |  :
+ * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
+ *   queue               :         ^--'                             :
+ */
+void __lockfunc resilient_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+       struct mcs_spinlock *prev, *next, *node;
+       u32 old, tail;
+       int idx;
+
+       BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+
+       if (pv_enabled())
+               goto pv_queue;
+
+       if (virt_spin_lock(lock))
+               return;
+
+       /*
+        * Wait for in-progress pending->locked hand-overs with a bounded
+        * number of spins so that we guarantee forward progress.
+        *
+        * 0,1,0 -> 0,0,1
+        */
+       if (val == _Q_PENDING_VAL) {
+               int cnt = _Q_PENDING_LOOPS;
+               val = atomic_cond_read_relaxed(&lock->val,
+                                              (VAL != _Q_PENDING_VAL) || !cnt--);
+       }
+
+       /*
+        * If we observe any contention; queue.
+        */
+       if (val & ~_Q_LOCKED_MASK)
+               goto queue;
+
+       /*
+        * trylock || pending
+        *
+        * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
+        */
+       val = queued_fetch_set_pending_acquire(lock);
+
+       /*
+        * If we observe contention, there is a concurrent locker.
+        *
+        * Undo and queue; our setting of PENDING might have made the
+        * n,0,0 -> 0,0,0 transition fail and it will now be waiting
+        * on @next to become !NULL.
+        */
+       if (unlikely(val & ~_Q_LOCKED_MASK)) {
+
+               /* Undo PENDING if we set it. */
+               if (!(val & _Q_PENDING_MASK))
+                       clear_pending(lock);
+
+               goto queue;
+       }
+
+       /*
+        * We're pending, wait for the owner to go away.
+        *
+        * 0,1,1 -> *,1,0
+        *
+        * this wait loop must be a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because not all
+        * clear_pending_set_locked() implementations imply full
+        * barriers.
+        */
+       if (val & _Q_LOCKED_MASK)
+               smp_cond_load_acquire(&lock->locked, !VAL);
+
+       /*
+        * take ownership and clear the pending bit.
+        *
+        * 0,1,0 -> 0,0,1
+        */
+       clear_pending_set_locked(lock);
+       lockevent_inc(lock_pending);
+       return;
+
+       /*
+        * End of pending bit optimistic spinning and beginning of MCS
+        * queuing.
+        */
+queue:
+       lockevent_inc(lock_slowpath);
+pv_queue:
+       node = this_cpu_ptr(&rqnodes[0].mcs);
+       idx = node->count++;
+       tail = encode_tail(smp_processor_id(), idx);
+
+       trace_contention_begin(lock, LCB_F_SPIN);
+
+       /*
+        * 4 nodes are allocated based on the assumption that there will
+        * not be nested NMIs taking spinlocks. That may not be true in
+        * some architectures even though the chance of needing more than
+        * 4 nodes will still be extremely unlikely. When that happens,
+        * we fall back to spinning on the lock directly without using
+        * any MCS node. This is not the most elegant solution, but is
+        * simple enough.
+        */
+       if (unlikely(idx >= _Q_MAX_NODES)) {
+               lockevent_inc(lock_no_node);
+               while (!queued_spin_trylock(lock))
+                       cpu_relax();
+               goto release;
+       }
+
+       node = grab_mcs_node(node, idx);
+
+       /*
+        * Keep counts of non-zero index values:
+        */
+       lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
+
+       /*
+        * Ensure that we increment the head node->count before initialising
+        * the actual node. If the compiler is kind enough to reorder these
+        * stores, then an IRQ could overwrite our assignments.
+        */
+       barrier();
+
+       node->locked = 0;
+       node->next = NULL;
+       pv_init_node(node);
+
+       /*
+        * We touched a (possibly) cold cacheline in the per-cpu queue node;
+        * attempt the trylock once more in the hope someone let go while we
+        * weren't watching.
+        */
+       if (queued_spin_trylock(lock))
+               goto release;
+
+       /*
+        * Ensure that the initialisation of @node is complete before we
+        * publish the updated tail via xchg_tail() and potentially link
+        * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
+        */
+       smp_wmb();
+
+       /*
+        * Publish the updated tail.
+        * We have already touched the queueing cacheline; don't bother with
+        * pending stuff.
+        *
+        * p,*,* -> n,*,*
+        */
+       old = xchg_tail(lock, tail);
+       next = NULL;
+
+       /*
+        * if there was a previous node; link it and wait until reaching the
+        * head of the waitqueue.
+        */
+       if (old & _Q_TAIL_MASK) {
+               prev = decode_tail(old, rqnodes);
+
+               /* Link @node into the waitqueue. */
+               WRITE_ONCE(prev->next, node);
+
+               pv_wait_node(node, prev);
+               arch_mcs_spin_lock_contended(&node->locked);
+
+               /*
+                * While waiting for the MCS lock, the next pointer may have
+                * been set by another lock waiter. We optimistically load
+                * the next pointer & prefetch the cacheline for writing
+                * to reduce latency in the upcoming MCS unlock operation.
+                */
+               next = READ_ONCE(node->next);
+               if (next)
+                       prefetchw(next);
+       }
+
+       /*
+        * we're at the head of the waitqueue, wait for the owner & pending to
+        * go away.
+        *
+        * *,x,y -> *,0,0
+        *
+        * this wait loop must use a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because the set_locked() function below
+        * does not imply a full barrier.
+        *
+        * The PV pv_wait_head_or_lock function, if active, will acquire
+        * the lock and return a non-zero value. So we have to skip the
+        * atomic_cond_read_acquire() call. As the next PV queue head hasn't
+        * been designated yet, there is no way for the locked value to become
+        * _Q_SLOW_VAL. So both the set_locked() and the
+        * atomic_cmpxchg_relaxed() calls will be safe.
+        *
+        * If PV isn't active, 0 will be returned instead.
+        *
+        */
+       if ((val = pv_wait_head_or_lock(lock, node)))
+               goto locked;
+
+       val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
+
+locked:
+       /*
+        * claim the lock:
+        *
+        * n,0,0 -> 0,0,1 : lock, uncontended
+        * *,*,0 -> *,*,1 : lock, contended
+        *
+        * If the queue head is the only one in the queue (lock value == tail)
+        * and nobody is pending, clear the tail code and grab the lock.
+        * Otherwise, we only need to grab the lock.
+        */
+
+       /*
+        * In the PV case we might already have _Q_LOCKED_VAL set, because
+        * of lock stealing; therefore we must also allow:
+        *
+        * n,0,1 -> 0,0,1
+        *
+        * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
+        *       above wait condition, therefore any concurrent setting of
+        *       PENDING will make the uncontended transition fail.
+        */
+       if ((val & _Q_TAIL_MASK) == tail) {
+               if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
+                       goto release; /* No contention */
+       }
+
+       /*
+        * Either somebody is queued behind us or _Q_PENDING_VAL got set
+        * which will then detect the remaining tail and queue behind us
+        * ensuring we'll see a @next.
+        */
+       set_locked(lock);
+
+       /*
+        * contended path; wait for next if not observed yet, release.
+        */
+       if (!next)
+               next = smp_cond_load_relaxed(&node->next, (VAL));
+
+       arch_mcs_spin_unlock_contended(&next->locked);
+       pv_kick_node(lock, next);
+
+release:
+       trace_contention_end(lock, 0);
+
+       /*
+        * release the node
+        */
+       __this_cpu_dec(rqnodes[0].mcs.count);
+}
+EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for resilient_queued_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef  pv_enabled
+#define pv_enabled()   true
+
+#undef pv_init_node
+#undef pv_wait_node
+#undef pv_kick_node
+#undef pv_wait_head_or_lock
+
+#undef  resilient_queued_spin_lock_slowpath
+#define resilient_queued_spin_lock_slowpath    __pv_resilient_queued_spin_lock_slowpath
+
+#include "../locking/qspinlock_paravirt.h"
+#include "rqspinlock.c"
+
+bool nopvspin;
+static __init int parse_nopvspin(char *arg)
+{
+       nopvspin = true;
+       return 0;
+}
+early_param("nopvspin", parse_nopvspin);
+#endif