return &env->subprog_info[subprog];
}
+struct bpf_call_summary {
+ u8 num_params;
+ bool is_void;
+ bool fastcall;
+};
+
+static inline bool bpf_helper_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == 0;
+}
+
+static inline bool bpf_pseudo_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_CALL;
+}
+
+static inline bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
bool bpf_insn_is_cond_jump(u8 code);
bool bpf_is_may_goto_insn(struct bpf_insn *insn);
+void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn);
+bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
+ struct bpf_call_summary *cs);
+
int bpf_stack_liveness_init(struct bpf_verifier_env *env);
void bpf_stack_liveness_free(struct bpf_verifier_env *env);
int bpf_update_live_stack(struct bpf_verifier_env *env);
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
-static bool bpf_helper_call(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == 0;
-}
-
-static bool bpf_pseudo_call(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == BPF_PSEUDO_CALL;
-}
-
-static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
-{
- return insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
-}
-
struct bpf_map_desc {
struct bpf_map *ptr;
int uid;
return btf_name_by_offset(desc_btf, func->name_off);
}
-static void verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
+void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
const struct bpf_insn_cbs cbs = {
.cb_call = disasm_kfunc_name,
bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
verbose(env, "stack=%s before ", env->tmp_str_buf);
verbose(env, "%d: ", idx);
- verbose_insn(env, insn);
+ bpf_verbose_insn(env, insn);
}
/* If there is a history record that some registers gained range at this insn,
}
}
-struct call_summary {
- u8 num_params;
- bool is_void;
- bool fastcall;
-};
-
/* If @call is a kfunc or helper call, fills @cs and returns true,
* otherwise returns false.
*/
-static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
- struct call_summary *cs)
+bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
+ struct bpf_call_summary *cs)
{
struct bpf_kfunc_call_arg_meta meta;
const struct bpf_func_proto *fn;
struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
struct bpf_insn *call = &env->prog->insnsi[insn_idx];
u32 clobbered_regs_mask;
- struct call_summary cs;
+ struct bpf_call_summary cs;
u32 expected_regs_mask;
s16 off;
int i;
- if (!get_call_summary(env, call, &cs))
+ if (!bpf_get_call_summary(env, call, &cs))
return;
/* A bitmask specifying which caller saved registers are clobbered
verbose_linfo(env, env->insn_idx, "; ");
env->prev_log_pos = env->log.end_pos;
verbose(env, "%d: ", env->insn_idx);
- verbose_insn(env, insn);
+ bpf_verbose_insn(env, insn);
env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
env->prev_log_pos = env->log.end_pos;
}
struct bpf_insn *insn,
struct insn_live_regs *info)
{
- struct call_summary cs;
+ struct bpf_call_summary cs;
u8 class = BPF_CLASS(insn->code);
u8 code = BPF_OP(insn->code);
u8 mode = BPF_MODE(insn->code);
case BPF_CALL:
def = ALL_CALLER_SAVED_REGS;
use = def & ~BIT(BPF_REG_0);
- if (get_call_summary(env, insn, &cs))
+ if (bpf_get_call_summary(env, insn, &cs))
use = GENMASK(cs.num_params, 1);
break;
default:
else
verbose(env, ".");
verbose(env, " ");
- verbose_insn(env, &insns[i]);
+ bpf_verbose_insn(env, &insns[i]);
if (bpf_is_ldimm64(&insns[i]))
i++;
}