]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: clean up visit_insn()'s instruction processing
authorAndrii Nakryiko <andrii@kernel.org>
Thu, 2 Mar 2023 23:50:04 +0000 (15:50 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Jan 2024 16:10:29 +0000 (17:10 +0100)
[ Upstream commit 653ae3a874aca6764a4c1f5a8bf1b072ade0d6f4 ]

Instead of referencing processed instruction repeatedly as insns[t]
throughout entire visit_insn() function, take a local insn pointer and
work with it in a cleaner way.

It makes enhancing this function further a bit easier as well.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230302235015.2044271-7-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Stable-dep-of: 3feb263bb516 ("bpf: handle ldimm64 properly in check_cfg()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/verifier.c

index d1393e07ab2c98cbb96d87726e66ba72e1aff67d..73d500c51bd863e49edea483b6fef71edd71f138 100644 (file)
@@ -11115,44 +11115,43 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
  */
 static int visit_insn(int t, struct bpf_verifier_env *env)
 {
-       struct bpf_insn *insns = env->prog->insnsi;
+       struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
        int ret;
 
-       if (bpf_pseudo_func(insns + t))
+       if (bpf_pseudo_func(insn))
                return visit_func_call_insn(t, insns, env, true);
 
        /* All non-branch instructions have a single fall-through edge. */
-       if (BPF_CLASS(insns[t].code) != BPF_JMP &&
-           BPF_CLASS(insns[t].code) != BPF_JMP32)
+       if (BPF_CLASS(insn->code) != BPF_JMP &&
+           BPF_CLASS(insn->code) != BPF_JMP32)
                return push_insn(t, t + 1, FALLTHROUGH, env, false);
 
-       switch (BPF_OP(insns[t].code)) {
+       switch (BPF_OP(insn->code)) {
        case BPF_EXIT:
                return DONE_EXPLORING;
 
        case BPF_CALL:
-               if (insns[t].imm == BPF_FUNC_timer_set_callback)
+               if (insn->imm == BPF_FUNC_timer_set_callback)
                        /* Mark this call insn as a prune point to trigger
                         * is_state_visited() check before call itself is
                         * processed by __check_func_call(). Otherwise new
                         * async state will be pushed for further exploration.
                         */
                        mark_prune_point(env, t);
-               return visit_func_call_insn(t, insns, env,
-                                           insns[t].src_reg == BPF_PSEUDO_CALL);
+               return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
 
        case BPF_JA:
-               if (BPF_SRC(insns[t].code) != BPF_K)
+               if (BPF_SRC(insn->code) != BPF_K)
                        return -EINVAL;
 
                /* unconditional jump with single edge */
-               ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
+               ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
                                true);
                if (ret)
                        return ret;
 
-               mark_prune_point(env, t + insns[t].off + 1);
-               mark_jmp_point(env, t + insns[t].off + 1);
+               mark_prune_point(env, t + insn->off + 1);
+               mark_jmp_point(env, t + insn->off + 1);
 
                return ret;
 
@@ -11164,7 +11163,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
                if (ret)
                        return ret;
 
-               return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
+               return push_insn(t, t + insn->off + 1, BRANCH, env, true);
        }
 }