]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: table based bpf_insn_successors()
authorEduard Zingerman <eddyz87@gmail.com>
Fri, 19 Sep 2025 02:18:43 +0000 (19:18 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 19 Sep 2025 16:27:23 +0000 (09:27 -0700)
Converting bpf_insn_successors() to use lookup table makes it ~1.5
times faster.

Also remove unnecessary conditionals:
- `idx + 1 < prog->len` is unnecessary because after check_cfg() all
  jump targets are guaranteed to be within a program;
- `i == 0 || succ[0] != dst` is unnecessary because any client of
  bpf_insn_successors() can handle duplicate edges:
  - compute_live_registers()
  - compute_scc()

Moving bpf_insn_successors() to liveness.c allows its inlining in
liveness.c:__update_stack_liveness().
Such inlining speeds up __update_stack_liveness() by ~40%.
bpf_insn_successors() is used in both verifier.c and liveness.c.
perf shows such move does not negatively impact users in verifier.c,
as these are executed only once before main varification pass.
Unlike __update_stack_liveness() which can be triggered multiple
times.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-10-c3cd27bacc60@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_verifier.h
kernel/bpf/liveness.c
kernel/bpf/verifier.c

index c7515da8500c56d9e6152657a00eb4fd76477856..4c497e839526a40a66c0c9f1054c19763ac9d41a 100644 (file)
@@ -1049,6 +1049,7 @@ void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_st
                      u32 frameno);
 
 struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
+int bpf_jmp_offset(struct bpf_insn *insn);
 int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]);
 void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
 bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
index 6f9dfaaf6e64faeaee074926be7fcdaa57b22bc3..3c611aba7f52c541408627b780ea664776145e8b 100644 (file)
@@ -433,6 +433,62 @@ static void log_mask_change(struct bpf_verifier_env *env, struct callchain *call
        bpf_log(&env->log, "\n");
 }
 
+int bpf_jmp_offset(struct bpf_insn *insn)
+{
+       u8 code = insn->code;
+
+       if (code == (BPF_JMP32 | BPF_JA))
+               return insn->imm;
+       return insn->off;
+}
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for opcode_info_tbl");
+
+inline int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
+{
+       static const struct opcode_info {
+               bool can_jump;
+               bool can_fallthrough;
+       } opcode_info_tbl[256] = {
+               [0 ... 255] = {.can_jump = false, .can_fallthrough = true},
+       #define _J(code, ...) \
+               [BPF_JMP   | code] = __VA_ARGS__, \
+               [BPF_JMP32 | code] = __VA_ARGS__
+
+               _J(BPF_EXIT,  {.can_jump = false, .can_fallthrough = false}),
+               _J(BPF_JA,    {.can_jump = true,  .can_fallthrough = false}),
+               _J(BPF_JEQ,   {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JNE,   {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JLT,   {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JLE,   {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JGT,   {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JGE,   {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JSGT,  {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JSGE,  {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JSLT,  {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JSLE,  {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JCOND, {.can_jump = true,  .can_fallthrough = true}),
+               _J(BPF_JSET,  {.can_jump = true,  .can_fallthrough = true}),
+       #undef _J
+       };
+       struct bpf_insn *insn = &prog->insnsi[idx];
+       const struct opcode_info *opcode_info;
+       int i = 0, insn_sz;
+
+       opcode_info = &opcode_info_tbl[BPF_CLASS(insn->code) | BPF_OP(insn->code)];
+       insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
+       if (opcode_info->can_fallthrough)
+               succ[i++] = idx + insn_sz;
+
+       if (opcode_info->can_jump)
+               succ[i++] = idx + bpf_jmp_offset(insn) + 1;
+
+       return i;
+}
+
+__diag_pop();
+
 static struct func_instance *get_outer_instance(struct bpf_verifier_env *env,
                                                struct func_instance *instance)
 {
index e1da2471442b28c52829b7c6d217e0eeb597cc9e..1d4183bc3cd1e928676283c51e1e86a0adcd2b2f 100644 (file)
@@ -3485,15 +3485,6 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
        return 0;
 }
 
-static int jmp_offset(struct bpf_insn *insn)
-{
-       u8 code = insn->code;
-
-       if (code == (BPF_JMP32 | BPF_JA))
-               return insn->imm;
-       return insn->off;
-}
-
 static int check_subprogs(struct bpf_verifier_env *env)
 {
        int i, subprog_start, subprog_end, off, cur_subprog = 0;
@@ -3520,7 +3511,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
                        goto next;
                if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
                        goto next;
-               off = i + jmp_offset(&insn[i]) + 1;
+               off = i + bpf_jmp_offset(&insn[i]) + 1;
                if (off < subprog_start || off >= subprog_end) {
                        verbose(env, "jump out of range from insn %d to %d\n", i, off);
                        return -EINVAL;
@@ -23944,67 +23935,6 @@ static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr,
        return 0;
 }
 
-static bool can_fallthrough(struct bpf_insn *insn)
-{
-       u8 class = BPF_CLASS(insn->code);
-       u8 opcode = BPF_OP(insn->code);
-
-       if (class != BPF_JMP && class != BPF_JMP32)
-               return true;
-
-       if (opcode == BPF_EXIT || opcode == BPF_JA)
-               return false;
-
-       return true;
-}
-
-static bool can_jump(struct bpf_insn *insn)
-{
-       u8 class = BPF_CLASS(insn->code);
-       u8 opcode = BPF_OP(insn->code);
-
-       if (class != BPF_JMP && class != BPF_JMP32)
-               return false;
-
-       switch (opcode) {
-       case BPF_JA:
-       case BPF_JEQ:
-       case BPF_JNE:
-       case BPF_JLT:
-       case BPF_JLE:
-       case BPF_JGT:
-       case BPF_JGE:
-       case BPF_JSGT:
-       case BPF_JSGE:
-       case BPF_JSLT:
-       case BPF_JSLE:
-       case BPF_JCOND:
-       case BPF_JSET:
-               return true;
-       }
-
-       return false;
-}
-
-int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
-{
-       struct bpf_insn *insn = &prog->insnsi[idx];
-       int i = 0, insn_sz;
-       u32 dst;
-
-       insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
-       if (can_fallthrough(insn) && idx + 1 < prog->len)
-               succ[i++] = idx + insn_sz;
-
-       if (can_jump(insn)) {
-               dst = idx + jmp_offset(insn) + 1;
-               if (i == 0 || succ[0] != dst)
-                       succ[i++] = dst;
-       }
-
-       return i;
-}
-
 /* Each field is a register bitmask */
 struct insn_live_regs {
        u16 use;        /* registers read by instruction */