]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Report rqspinlock deadlocks/timeout to BPF stderr
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Thu, 3 Jul 2025 20:48:14 +0000 (13:48 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 4 Jul 2025 02:30:07 +0000 (19:30 -0700)
Begin reporting rqspinlock deadlocks and timeout to BPF program's
stderr.

Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250703204818.925464-9-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/rqspinlock.c

index 338305c8852cf625e0ae84519bb5f3a4aad9584a..5ab354d55d829566d4e885bca657ed395a2aa6a5 100644 (file)
@@ -666,6 +666,27 @@ EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
 
 __bpf_kfunc_start_defs();
 
+static void bpf_prog_report_rqspinlock_violation(const char *str, void *lock, bool irqsave)
+{
+       struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
+       struct bpf_stream_stage ss;
+       struct bpf_prog *prog;
+
+       prog = bpf_prog_find_from_stack();
+       if (!prog)
+               return;
+       bpf_stream_stage(ss, prog, BPF_STDERR, ({
+               bpf_stream_printk(ss, "ERROR: %s for bpf_res_spin_lock%s\n", str, irqsave ? "_irqsave" : "");
+               bpf_stream_printk(ss, "Attempted lock   = 0x%px\n", lock);
+               bpf_stream_printk(ss, "Total held locks = %d\n", rqh->cnt);
+               for (int i = 0; i < min(RES_NR_HELD, rqh->cnt); i++)
+                       bpf_stream_printk(ss, "Held lock[%2d] = 0x%px\n", i, rqh->locks[i]);
+               bpf_stream_dump_stack(ss);
+       }));
+}
+
+#define REPORT_STR(ret) ({ (ret) == -ETIMEDOUT ? "Timeout detected" : "AA or ABBA deadlock detected"; })
+
 __bpf_kfunc int bpf_res_spin_lock(struct bpf_res_spin_lock *lock)
 {
        int ret;
@@ -676,6 +697,7 @@ __bpf_kfunc int bpf_res_spin_lock(struct bpf_res_spin_lock *lock)
        preempt_disable();
        ret = res_spin_lock((rqspinlock_t *)lock);
        if (unlikely(ret)) {
+               bpf_prog_report_rqspinlock_violation(REPORT_STR(ret), lock, false);
                preempt_enable();
                return ret;
        }
@@ -698,6 +720,7 @@ __bpf_kfunc int bpf_res_spin_lock_irqsave(struct bpf_res_spin_lock *lock, unsign
        local_irq_save(flags);
        ret = res_spin_lock((rqspinlock_t *)lock);
        if (unlikely(ret)) {
+               bpf_prog_report_rqspinlock_violation(REPORT_STR(ret), lock, true);
                local_irq_restore(flags);
                preempt_enable();
                return ret;