]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Add range tracking for BPF_NEG
authorSong Liu <song@kernel.org>
Wed, 25 Jun 2025 16:40:24 +0000 (09:40 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 25 Jun 2025 22:12:17 +0000 (15:12 -0700)
Add range tracking for instruction BPF_NEG. Without this logic, a trivial
program like the following will fail

    volatile bool found_value_b;
    SEC("lsm.s/socket_connect")
    int BPF_PROG(test_socket_connect)
    {
        if (!found_value_b)
                return -1;
        return 0;
    }

with verifier log:

"At program exit the register R0 has smin=0 smax=4294967295 should have
been in [-4095, 0]".

This is because range information is lost in BPF_NEG:

0: R1=ctx() R10=fp0
; if (!found_value_b) @ xxxx.c:24
0: (18) r1 = 0xffa00000011e7048       ; R1_w=map_value(...)
2: (71) r0 = *(u8 *)(r1 +0)           ; R0_w=scalar(smin32=0,smax=255)
3: (a4) w0 ^= 1                       ; R0_w=scalar(smin32=0,smax=255)
4: (84) w0 = -w0                      ; R0_w=scalar(range info lost)

Note that, the log above is manually modified to highlight relevant bits.

Fix this by maintaining proper range information with BPF_NEG, so that
the verifier will know:

4: (84) w0 = -w0                      ; R0_w=scalar(smin32=-255,smax=0)

Also updated selftests based on the expected behavior.

Signed-off-by: Song Liu <song@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250625164025.3310203-2-song@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/tnum.h
kernel/bpf/tnum.c
kernel/bpf/verifier.c
tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c
tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c

index 3c13240077b87a70eb73559fcb8f17dd41a6da3f..57ed3035cc30932353d95bfc703bdda98edd830b 100644 (file)
@@ -40,6 +40,8 @@ struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
 struct tnum tnum_add(struct tnum a, struct tnum b);
 /* Subtract two tnums, return @a - @b */
 struct tnum tnum_sub(struct tnum a, struct tnum b);
+/* Neg of a tnum, return  0 - @a */
+struct tnum tnum_neg(struct tnum a);
 /* Bitwise-AND, return @a & @b */
 struct tnum tnum_and(struct tnum a, struct tnum b);
 /* Bitwise-OR, return @a | @b */
index 9dbc31b25e3d08f4424fc8cf298944018acc85a1..fa353c5d550fc998d96de481e21cf68ab8bbf9be 100644 (file)
@@ -83,6 +83,11 @@ struct tnum tnum_sub(struct tnum a, struct tnum b)
        return TNUM(dv & ~mu, mu);
 }
 
+struct tnum tnum_neg(struct tnum a)
+{
+       return tnum_sub(TNUM(0, 0), a);
+}
+
 struct tnum tnum_and(struct tnum a, struct tnum b)
 {
        u64 alpha, beta, v;
index f403524bd2153845d7a9cc78d43cd671a6c5afea..2ff22ef423480b87ce21eaefb36b18d15dcc45cd 100644 (file)
@@ -15182,6 +15182,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
        switch (BPF_OP(insn->code)) {
        case BPF_ADD:
        case BPF_SUB:
+       case BPF_NEG:
        case BPF_AND:
        case BPF_XOR:
        case BPF_OR:
@@ -15250,6 +15251,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                scalar_min_max_sub(dst_reg, &src_reg);
                dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
                break;
+       case BPF_NEG:
+               env->fake_reg[0] = *dst_reg;
+               __mark_reg_known(dst_reg, 0);
+               scalar32_min_max_sub(dst_reg, &env->fake_reg[0]);
+               scalar_min_max_sub(dst_reg, &env->fake_reg[0]);
+               dst_reg->var_off = tnum_neg(env->fake_reg[0].var_off);
+               break;
        case BPF_MUL:
                dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
                scalar32_min_max_mul(dst_reg, &src_reg);
@@ -15473,7 +15481,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                }
 
                /* check dest operand */
-               err = check_reg_arg(env, insn->dst_reg, DST_OP);
+               if (opcode == BPF_NEG) {
+                       err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
+                       err = err ?: adjust_scalar_min_max_vals(env, insn,
+                                                        &regs[insn->dst_reg],
+                                                        regs[insn->dst_reg]);
+               } else {
+                       err = check_reg_arg(env, insn->dst_reg, DST_OP);
+               }
                if (err)
                        return err;
 
index c506afbdd93611228faa780b46e61cb606f5e79f..260a6df264e336fbb50fda36056f5b215e0eaeba 100644 (file)
@@ -159,13 +159,16 @@ __failure_unpriv
 __naked void deducing_bounds_from_const_10(void)
 {
        asm volatile ("                                 \
+       r6 = r1;                                        \
        r0 = 0;                                         \
        if r0 s<= 0 goto l0_%=;                         \
-l0_%=: /* Marks reg as unknown. */                     \
-       r0 = -r0;                                       \
-       r0 -= r1;                                       \
+l0_%=: /* Marks r0 as unknown. */                      \
+       call %[bpf_get_prandom_u32];                    \
+       r0 -= r6;                                       \
        exit;                                           \
-"      ::: __clobber_all);
+"      :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_all);
 }
 
 char _license[] SEC("license") = "GPL";
index fcea9819e359d0d9e2e2b8e3acdc11b7b084af78..af7938ce56cb07a340470ce4629baea3753562ab 100644 (file)
@@ -231,6 +231,10 @@ __retval(1)
 __naked void ptr_unknown_vs_unknown_lt(void)
 {
        asm volatile ("                                 \
+       r8 = r1;                                        \
+       call %[bpf_get_prandom_u32];                    \
+       r9 = r0;                                        \
+       r1 = r8;                                        \
        r0 = *(u32*)(r1 + %[__sk_buff_len]);            \
        r1 = 0;                                         \
        *(u64*)(r10 - 8) = r1;                          \
@@ -245,11 +249,11 @@ l1_%=:    call %[bpf_map_lookup_elem];                    \
        r4 = *(u8*)(r0 + 0);                            \
        if r4 == 1 goto l3_%=;                          \
        r1 = 6;                                         \
-       r1 = -r1;                                       \
+       r1 = r9;                                        \
        r1 &= 0x3;                                      \
        goto l4_%=;                                     \
 l3_%=: r1 = 6;                                         \
-       r1 = -r1;                                       \
+       r1 = r9;                                        \
        r1 &= 0x7;                                      \
 l4_%=: r1 += r0;                                       \
        r0 = *(u8*)(r1 + 0);                            \
@@ -259,7 +263,8 @@ l2_%=:      r0 = 1;                                         \
        : __imm(bpf_map_lookup_elem),
          __imm_addr(map_array_48b),
          __imm_addr(map_hash_16b),
-         __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+         __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+         __imm(bpf_get_prandom_u32)
        : __clobber_all);
 }
 
@@ -271,6 +276,10 @@ __retval(1)
 __naked void ptr_unknown_vs_unknown_gt(void)
 {
        asm volatile ("                                 \
+       r8 = r1;                                        \
+       call %[bpf_get_prandom_u32];                    \
+       r9 = r0;                                        \
+       r1 = r8;                                        \
        r0 = *(u32*)(r1 + %[__sk_buff_len]);            \
        r1 = 0;                                         \
        *(u64*)(r10 - 8) = r1;                          \
@@ -285,11 +294,11 @@ l1_%=:    call %[bpf_map_lookup_elem];                    \
        r4 = *(u8*)(r0 + 0);                            \
        if r4 == 1 goto l3_%=;                          \
        r1 = 6;                                         \
-       r1 = -r1;                                       \
+       r1 = r9;                                        \
        r1 &= 0x7;                                      \
        goto l4_%=;                                     \
 l3_%=: r1 = 6;                                         \
-       r1 = -r1;                                       \
+       r1 = r9;                                        \
        r1 &= 0x3;                                      \
 l4_%=: r1 += r0;                                       \
        r0 = *(u8*)(r1 + 0);                            \
@@ -299,7 +308,8 @@ l2_%=:      r0 = 1;                                         \
        : __imm(bpf_map_lookup_elem),
          __imm_addr(map_array_48b),
          __imm_addr(map_hash_16b),
-         __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
+         __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
+         __imm(bpf_get_prandom_u32)
        : __clobber_all);
 }