]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests/bpf: add reproducer for spurious precision propagation through calls
authorEduard Zingerman <eddyz87@gmail.com>
Sat, 7 Mar 2026 00:02:48 +0000 (16:02 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 7 Mar 2026 05:50:05 +0000 (21:50 -0800)
Add a test for the scenario described in the previous commit:
an iterator loop with two paths where one ties r2/r7 via
shared scalar id and skips a call, while the other goes
through the call. Precision marks from the linked registers
get spuriously propagated to the call path via
propagate_precision(), hitting "backtracking call unexpected
regs" in backtrack_insn().

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20260306-linked-regs-and-propagate-precision-v1-2-18e859be570d@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c

index 2ef346c827c25b96ae86096323dfbfbd5512d2db..7bf7dbfd237daa332fe9e276cedff6d940f5854e 100644 (file)
@@ -363,4 +363,68 @@ void alu32_negative_offset(void)
        __sink(path[0]);
 }
 
+void dummy_calls(void)
+{
+       bpf_iter_num_new(0, 0, 0);
+       bpf_iter_num_next(0);
+       bpf_iter_num_destroy(0);
+}
+
+SEC("socket")
+__success
+__flag(BPF_F_TEST_STATE_FREQ)
+int spurious_precision_marks(void *ctx)
+{
+       struct bpf_iter_num iter;
+
+       asm volatile(
+               "r1 = %[iter];"
+               "r2 = 0;"
+               "r3 = 10;"
+               "call %[bpf_iter_num_new];"
+       "1:"
+               "r1 = %[iter];"
+               "call %[bpf_iter_num_next];"
+               "if r0 == 0 goto 4f;"
+               "r7 = *(u32 *)(r0 + 0);"
+               "r8 = *(u32 *)(r0 + 0);"
+               /* This jump can't be predicted and does not change r7 or r8 state. */
+               "if r7 > r8 goto 2f;"
+               /* Branch explored first ties r2 and r7 as having the same id. */
+               "r2 = r7;"
+               "goto 3f;"
+       "2:"
+               /* Branch explored second does not tie r2 and r7 but has a function call. */
+               "call %[bpf_get_prandom_u32];"
+       "3:"
+               /*
+                * A checkpoint.
+                * When first branch is explored, this would inject linked registers
+                * r2 and r7 into the jump history.
+                * When second branch is explored, this would be a cache hit point,
+                * triggering propagate_precision().
+                */
+               "if r7 <= 42 goto +0;"
+               /*
+                * Mark r7 as precise using an if condition that is always true.
+                * When reached via the second branch, this triggered a bug in the backtrack_insn()
+                * because r2 (tied to r7) was propagated as precise to a call.
+                */
+               "if r7 <= 0xffffFFFF goto +0;"
+               "goto 1b;"
+       "4:"
+               "r1 = %[iter];"
+               "call %[bpf_iter_num_destroy];"
+               :
+               : __imm_ptr(iter),
+                 __imm(bpf_iter_num_new),
+                 __imm(bpf_iter_num_next),
+                 __imm(bpf_iter_num_destroy),
+                 __imm(bpf_get_prandom_u32)
+               : __clobber_common, "r7", "r8"
+       );
+
+       return 0;
+}
+
 char _license[] SEC("license") = "GPL";