struct {
u32 map_index; /* index into used_maps[] */
u32 map_off; /* offset from value base address */
- struct bpf_iarray *jt; /* jump table for gotox instruction */
};
struct {
enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
/* remember the offset of node field within type to rewrite */
u64 insert_off;
};
+ struct bpf_iarray *jt; /* jump table for gotox or bpf_tailcall call instruction */
struct btf_struct_meta *kptr_struct_meta;
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
+ u32 exit_idx; /* Index of one of the BPF_EXIT instructions in this subprogram */
u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
/* offsets in range [stack_depth .. fastcall_stack_off)
bool keep_fastcall_stack: 1;
bool changes_pkt_data: 1;
bool might_sleep: 1;
+ u8 arg_cnt:3;
enum priv_stack_mode priv_stack_mode;
- u8 arg_cnt;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
};
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = &prog->insnsi[idx];
const struct opcode_info *opcode_info;
- struct bpf_iarray *succ;
+ struct bpf_iarray *succ, *jt;
int insn_sz;
- if (unlikely(insn_is_gotox(insn)))
- return env->insn_aux_data[idx].jt;
+ jt = env->insn_aux_data[idx].jt;
+ if (unlikely(jt))
+ return jt;
/* pre-allocated array of size up to 2; reset cnt, as it may have been used already */
succ = env->succ;
subprog[cur_subprog].has_ld_abs = true;
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
- if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+ if (BPF_OP(code) == BPF_CALL)
goto next;
+ if (BPF_OP(code) == BPF_EXIT) {
+ subprog[cur_subprog].exit_idx = i;
+ goto next;
+ }
off = i + bpf_jmp_offset(&insn[i]) + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return keep_exploring ? KEEP_EXPLORING : DONE_EXPLORING;
}
+static int visit_tailcall_insn(struct bpf_verifier_env *env, int t)
+{
+ static struct bpf_subprog_info *subprog;
+ struct bpf_iarray *jt;
+
+ if (env->insn_aux_data[t].jt)
+ return 0;
+
+ jt = iarray_realloc(NULL, 2);
+ if (!jt)
+ return -ENOMEM;
+
+ subprog = bpf_find_containing_subprog(env, t);
+ jt->items[0] = t + 1;
+ jt->items[1] = subprog->exit_idx;
+ env->insn_aux_data[t].jt = jt;
+ return 0;
+}
+
/* Visits the instruction at index t and returns one of the following:
* < 0 - an error occurred
* DONE_EXPLORING - the instruction was fully explored
mark_subprog_might_sleep(env, t);
if (bpf_helper_changes_pkt_data(insn->imm))
mark_subprog_changes_pkt_data(env, t);
+ if (insn->imm == BPF_FUNC_tail_call)
+ visit_tailcall_insn(env, t);
} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
struct bpf_kfunc_call_arg_meta meta;
int i;
for (i = start; i < end; i++) {
- if (insn_is_gotox(&insns[i])) {
+ if (aux_data[i].jt) {
kvfree(aux_data[i].jt);
aux_data[i].jt = NULL;
}