]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Introduce rqspinlock kfuncs
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Sun, 16 Mar 2025 04:05:38 +0000 (21:05 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 19 Mar 2025 15:03:06 +0000 (08:03 -0700)
Introduce four new kfuncs, bpf_res_spin_lock, and bpf_res_spin_unlock,
and their irqsave/irqrestore variants, which wrap the rqspinlock APIs.
bpf_res_spin_lock returns a conditional result, depending on whether the
lock was acquired (NULL is returned when lock acquisition succeeds,
non-NULL upon failure). The memory pointed to by the returned pointer
upon failure can be dereferenced after the NULL check to obtain the
error code.

Instead of using the old bpf_spin_lock type, introduce a new type with
the same layout, and the same alignment, but a different name to avoid
type confusion.

Preemption is disabled upon successful lock acquisition, however IRQs
are not. Special kfuncs can be introduced later to allow disabling IRQs
when taking a spin lock. Resilient locks are safe against AA deadlocks,
hence not disabling IRQs currently does not allow violation of kernel
safety.

__irq_flag annotation is used to accept IRQ flags for the IRQ-variants,
with the same semantics as existing bpf_local_irq_{save, restore}.

These kfuncs will require additional verifier-side support in subsequent
commits, to allow programs to hold multiple locks at the same time.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250316040541.108729-23-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/asm-generic/rqspinlock.h
include/linux/bpf.h
kernel/bpf/rqspinlock.c

index 23abd0b8d0f9f5ccd31e28e5c4eb7dc8e78f9038..6d4244d643df32e78dcbc8561534c208fa9780c0 100644 (file)
@@ -23,6 +23,13 @@ struct rqspinlock {
        };
 };
 
+/* Even though this is same as struct rqspinlock, we need to emit a distinct
+ * type in BTF for BPF programs.
+ */
+struct bpf_res_spin_lock {
+       u32 val;
+};
+
 struct qspinlock;
 #ifdef CONFIG_QUEUED_SPINLOCKS
 typedef struct qspinlock rqspinlock_t;
index 973a88d9b52bc9786929a03456d2726d27e93700..e3928fb19c2c9afcf1174907b04e38c10ded1202 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/static_call.h>
 #include <linux/memcontrol.h>
 #include <linux/cfi.h>
+#include <asm/rqspinlock.h>
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
index ed21ee0100633ec7f5bb9a5ec6ae053cd4e30473..b896c4a75a5c9bde1adc9a184b49532ee1916065 100644 (file)
@@ -15,6 +15,8 @@
 
 #include <linux/smp.h>
 #include <linux/bug.h>
+#include <linux/bpf.h>
+#include <linux/err.h>
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
@@ -661,3 +663,75 @@ err_release_entry:
 EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
 
 #endif /* CONFIG_QUEUED_SPINLOCKS */
+
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc int bpf_res_spin_lock(struct bpf_res_spin_lock *lock)
+{
+       int ret;
+
+       BUILD_BUG_ON(sizeof(rqspinlock_t) != sizeof(struct bpf_res_spin_lock));
+       BUILD_BUG_ON(__alignof__(rqspinlock_t) != __alignof__(struct bpf_res_spin_lock));
+
+       preempt_disable();
+       ret = res_spin_lock((rqspinlock_t *)lock);
+       if (unlikely(ret)) {
+               preempt_enable();
+               return ret;
+       }
+       return 0;
+}
+
+__bpf_kfunc void bpf_res_spin_unlock(struct bpf_res_spin_lock *lock)
+{
+       res_spin_unlock((rqspinlock_t *)lock);
+       preempt_enable();
+}
+
+__bpf_kfunc int bpf_res_spin_lock_irqsave(struct bpf_res_spin_lock *lock, unsigned long *flags__irq_flag)
+{
+       u64 *ptr = (u64 *)flags__irq_flag;
+       unsigned long flags;
+       int ret;
+
+       preempt_disable();
+       local_irq_save(flags);
+       ret = res_spin_lock((rqspinlock_t *)lock);
+       if (unlikely(ret)) {
+               local_irq_restore(flags);
+               preempt_enable();
+               return ret;
+       }
+       *ptr = flags;
+       return 0;
+}
+
+__bpf_kfunc void bpf_res_spin_unlock_irqrestore(struct bpf_res_spin_lock *lock, unsigned long *flags__irq_flag)
+{
+       u64 *ptr = (u64 *)flags__irq_flag;
+       unsigned long flags = *ptr;
+
+       res_spin_unlock((rqspinlock_t *)lock);
+       local_irq_restore(flags);
+       preempt_enable();
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(rqspinlock_kfunc_ids)
+BTF_ID_FLAGS(func, bpf_res_spin_lock, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_res_spin_unlock)
+BTF_ID_FLAGS(func, bpf_res_spin_lock_irqsave, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_res_spin_unlock_irqrestore)
+BTF_KFUNCS_END(rqspinlock_kfunc_ids)
+
+static const struct btf_kfunc_id_set rqspinlock_kfunc_set = {
+       .owner = THIS_MODULE,
+       .set = &rqspinlock_kfunc_ids,
+};
+
+static __init int rqspinlock_register_kfuncs(void)
+{
+       return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &rqspinlock_kfunc_set);
+}
+late_initcall(rqspinlock_register_kfuncs);