]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: Add testcases for BPF_ADD and BPF_SUB
authorHarishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
Mon, 23 Jun 2025 04:03:57 +0000 (00:03 -0400)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 25 Jun 2025 01:48:34 +0000 (18:48 -0700)
The previous commit improves the precision in scalar(32)_min_max_add,
and scalar(32)_min_max_sub. The improvement in precision occurs in cases
when all outcomes overflow or underflow, respectively.

This commit adds selftests that exercise those cases.

This commit also adds selftests for cases where the output register
state bounds for u(32)_min/u(32)_max are conservatively set to unbounded
(when there is partial overflow or underflow).

Signed-off-by: Harishankar Vishwanathan <harishankar.vishwanathan@gmail.com>
Co-developed-by: Matan Shachnai <m.shachnai@rutgers.edu>
Signed-off-by: Matan Shachnai <m.shachnai@rutgers.edu>
Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250623040359.343235-3-harishankar.vishwanathan@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/verifier_bounds.c

index 30e16153fdf146c6a716a8f6f16bbc824067c61a..e52a24e15b34ab35dfdb3e129305885917a7c66d 100644 (file)
@@ -1371,4 +1371,165 @@ __naked void mult_sign_ovf(void)
          __imm(bpf_skb_store_bytes)
        : __clobber_all);
 }
+
+SEC("socket")
+__description("64-bit addition, all outcomes overflow")
+__success __log_level(2)
+__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)")
+__retval(0)
+__naked void add64_full_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "r4 = r0;"
+       "r3 = 0xa000000000000000 ll;"
+       "r3 |= r4;"
+       "r3 += r3;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit addition, partial overflow, result in unbounded reg")
+__success __log_level(2)
+__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()")
+__retval(0)
+__naked void add64_partial_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "r4 = r0;"
+       "r3 = 2;"
+       "r3 |= r4;"
+       "r3 += r3;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit addition overflow, all outcomes overflow")
+__success __log_level(2)
+__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void add32_full_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "w4 = w0;"
+       "w3 = 0xa0000000;"
+       "w3 |= w4;"
+       "w3 += w3;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit addition, partial overflow, result in unbounded u32 bounds")
+__success __log_level(2)
+__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void add32_partial_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "w4 = w0;"
+       "w3 = 2;"
+       "w3 |= w4;"
+       "w3 += w3;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit subtraction, all outcomes underflow")
+__success __log_level(2)
+__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)")
+__retval(0)
+__naked void sub64_full_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "r1 = r0;"
+       "r2 = 0x8000000000000000 ll;"
+       "r1 |= r2;"
+       "r3 = 0;"
+       "r3 -= r1;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("64-bit subtration, partial overflow, result in unbounded reg")
+__success __log_level(2)
+__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()")
+__retval(0)
+__naked void sub64_partial_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "r3 = r0;"
+       "r2 = 1;"
+       "r3 -= r2;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit subtraction overflow, all outcomes underflow")
+__success __log_level(2)
+__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void sub32_full_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "w1 = w0;"
+       "w2 = 0x80000000;"
+       "w1 |= w2;"
+       "w3 = 0;"
+       "w3 -= w1;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
+SEC("socket")
+__description("32-bit subtration, partial overflow, result in unbounded u32 bounds")
+__success __log_level(2)
+__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
+__retval(0)
+__naked void sub32_partial_overflow(void)
+{
+       asm volatile (
+       "call %[bpf_get_prandom_u32];"
+       "w3 = w0;"
+       "w2 = 1;"
+       "w3 -= w2;"
+       "r0 = 0;"
+       "exit"
+       :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";