]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Add helper to detect indirect jump targets
authorXu Kuohai <xukuohai@huawei.com>
Thu, 16 Apr 2026 06:43:39 +0000 (06:43 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 16 Apr 2026 14:03:40 +0000 (07:03 -0700)
Introduce helper bpf_insn_is_indirect_target to check whether a BPF
instruction is an indirect jump target.

Since the verifier knows which instructions are indirect jump targets,
add a new flag indirect_target to struct bpf_insn_aux_data to mark
them. The verifier sets this flag when verifying an indirect jump target
instruction, and the helper checks the flag to determine whether an
instruction is an indirect jump target.

Reviewed-by: Anton Protopopov <a.s.protopopov@gmail.com> #v8
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com> #v12
Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
Link: https://lore.kernel.org/r/20260416064341.151802-4-xukuohai@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
include/linux/bpf_verifier.h
kernel/bpf/core.c
kernel/bpf/fixups.c
kernel/bpf/verifier.c

index 0136a108d083768cfdde829e96e1c2ea4bb15104..b4b703c90ca94f2528f04d87a9d429b7c6b70d6e 100644 (file)
@@ -1541,6 +1541,8 @@ bool bpf_has_frame_pointer(unsigned long ip);
 int bpf_jit_charge_modmem(u32 size);
 void bpf_jit_uncharge_modmem(u32 size);
 bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
+bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog,
+                                int insn_idx);
 #else
 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
                                           struct bpf_trampoline *tr,
index 53e8664cb566d6aadbe24c5f109f3de0b3b62311..b148f816f25b08a4cff5bd7a2512b5272f73a35d 100644 (file)
@@ -630,16 +630,17 @@ struct bpf_insn_aux_data {
 
        /* below fields are initialized once */
        unsigned int orig_idx; /* original instruction index */
-       bool jmp_point;
-       bool prune_point;
+       u32 jmp_point:1;
+       u32 prune_point:1;
        /* ensure we check state equivalence and save state checkpoint and
         * this instruction, regardless of any heuristics
         */
-       bool force_checkpoint;
+       u32 force_checkpoint:1;
        /* true if instruction is a call to a helper function that
         * accepts callback function as a parameter.
         */
-       bool calls_callback;
+       u32 calls_callback:1;
+       u32 indirect_target:1; /* if it is an indirect jump target */
        /*
         * CFG strongly connected component this instruction belongs to,
         * zero if it is a singleton SCC.
index 79361aa117575ded7b5c8ee216f74d2d5d26cdfb..8b018ff488750b46b64dfea6fee051d81bcaa5d1 100644 (file)
@@ -1573,6 +1573,15 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bp
        clone->blinded = 1;
        return clone;
 }
+
+bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog,
+                                int insn_idx)
+{
+       if (!env)
+               return false;
+       insn_idx += prog->aux->subprog_start;
+       return env->insn_aux_data[insn_idx].indirect_target;
+}
 #endif /* CONFIG_BPF_JIT */
 
 /* Base function for offset calculation. Needs to go into .text section,
index 6c86980cc9e8eb38a88564e438e6f233f9792963..fba9e8c008787547111ae60f4a508d19ea38500c 100644 (file)
@@ -183,6 +183,18 @@ static void adjust_insn_aux_data(struct bpf_verifier_env *env,
                data[i].seen = old_seen;
                data[i].zext_dst = insn_has_def32(insn + i);
        }
+
+       /*
+        * The indirect_target flag of the original instruction was moved to the last of the
+        * new instructions by the above memmove and memset, but the indirect jump target is
+        * actually the first instruction, so move it back. This also matches with the behavior
+        * of bpf_insn_array_adjust(), which preserves xlated_off to point to the first new
+        * instruction.
+        */
+       if (data[off + cnt - 1].indirect_target) {
+               data[off].indirect_target = 1;
+               data[off + cnt - 1].indirect_target = 0;
+       }
 }
 
 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
index e804e0da350035158a84b2bbbd79cd4b26863e0c..1e36b9e9127784a012c0d9d552785e4fa41e911e 100644 (file)
@@ -3497,6 +3497,11 @@ static int insn_stack_access_flags(int frameno, int spi)
        return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
 }
 
+static void mark_indirect_target(struct bpf_verifier_env *env, int idx)
+{
+       env->insn_aux_data[idx].indirect_target = true;
+}
+
 #define LR_FRAMENO_BITS        3
 #define LR_SPI_BITS    6
 #define LR_ENTRY_BITS  (LR_SPI_BITS + LR_FRAMENO_BITS + 1)
@@ -17545,12 +17550,14 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
        }
 
        for (i = 0; i < n - 1; i++) {
+               mark_indirect_target(env, env->gotox_tmp_buf->items[i]);
                other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
                                          env->insn_idx, env->cur_state->speculative);
                if (IS_ERR(other_branch))
                        return PTR_ERR(other_branch);
        }
        env->insn_idx = env->gotox_tmp_buf->items[n-1];
+       mark_indirect_target(env, env->insn_idx);
        return INSN_IDX_UPDATED;
 }