]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: signal error if old liveness is more conservative than new
authorEduard Zingerman <eddyz87@gmail.com>
Fri, 19 Sep 2025 02:18:41 +0000 (19:18 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 19 Sep 2025 16:27:23 +0000 (09:27 -0700)
Unlike the new algorithm, register chain based liveness tracking is
fully path sensitive, and thus should be strictly more accurate.
Validate the new algorithm by signaling an error whenever it considers
a stack slot dead while the old algorithm considers it alive.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-8-c3cd27bacc60@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_verifier.h
kernel/bpf/verifier.c

index 2e3bdd50e2ba46040d6806a0b6ac18124fcb6c75..dec5da3a2e59dc22ef3cb60407f82267cf5a2c61 100644 (file)
@@ -852,6 +852,7 @@ struct bpf_verifier_env {
        /* array of pointers to bpf_scc_info indexed by SCC id */
        struct bpf_scc_info **scc_info;
        u32 scc_cnt;
+       bool internal_error;
 };
 
 static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
index bb931a144b95324990eda5ce783db8c0463ce3aa..f70e34a38c13145756692fb5955f1d5dc8118219 100644 (file)
@@ -18576,6 +18576,11 @@ static void clean_func_state(struct bpf_verifier_env *env,
 
        for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
                if (!bpf_stack_slot_alive(env, st->frameno, i)) {
+                       if (st->stack[i].spilled_ptr.live & REG_LIVE_READ) {
+                               verifier_bug(env, "incorrect live marks #1 for insn %d frameno %d spi %d\n",
+                                            env->insn_idx, st->frameno, i);
+                               env->internal_error = true;
+                       }
                        __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
                        for (j = 0; j < BPF_REG_SIZE; j++)
                                st->stack[i].slot_type[j] = STACK_INVALID;
@@ -19546,6 +19551,8 @@ skip_inf_loop_check:
                loop = incomplete_read_marks(env, &sl->state);
                if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) {
 hit:
+                       if (env->internal_error)
+                               return -EFAULT;
                        sl->hit_cnt++;
                        /* reached equivalent register/stack state,
                         * prune the search.
@@ -19660,6 +19667,8 @@ hit:
                        return 1;
                }
 miss:
+               if (env->internal_error)
+                       return -EFAULT;
                /* when new state is not going to be added do not increase miss count.
                 * Otherwise several loop iterations will remove the state
                 * recorded earlier. The goal of these heuristics is to have