}
}
+/*
+ * Record access_bytes from helper/kfunc or load/store insn.
+ * access_bytes > 0: stack read
+ * access_bytes < 0: stack write
+ * access_bytes == S64_MIN: unknown — conservative, mark [0..slot] as read
+ * access_bytes == 0: no access
+ *
+ */
+static int record_stack_access_off(struct bpf_verifier_env *env,
+ struct func_instance *instance, s64 fp_off,
+ s64 access_bytes, u32 frame, u32 insn_idx)
+{
+ s32 slot_hi, slot_lo;
+ spis_t mask;
+
+ if (fp_off >= 0)
+ /*
+ * out of bounds stack access doesn't contribute
+ * into actual stack liveness. It will be rejected
+ * by the main verifier pass later.
+ */
+ return 0;
+ if (access_bytes == S64_MIN) {
+ /* helper/kfunc read unknown amount of bytes from fp_off until fp+0 */
+ slot_hi = (-fp_off - 1) / STACK_SLOT_SZ;
+ mask = SPIS_ZERO;
+ spis_or_range(&mask, 0, slot_hi);
+ return mark_stack_read(instance, frame, insn_idx, mask);
+ }
+ if (access_bytes > 0) {
+ /* Mark any touched slot as use */
+ slot_hi = (-fp_off - 1) / STACK_SLOT_SZ;
+ slot_lo = max_t(s32, (-fp_off - access_bytes) / STACK_SLOT_SZ, 0);
+ mask = SPIS_ZERO;
+ spis_or_range(&mask, slot_lo, slot_hi);
+ return mark_stack_read(instance, frame, insn_idx, mask);
+ } else if (access_bytes < 0) {
+ /* Mark only fully covered slots as def */
+ access_bytes = -access_bytes;
+ slot_hi = (-fp_off) / STACK_SLOT_SZ - 1;
+ slot_lo = max_t(s32, (-fp_off - access_bytes + STACK_SLOT_SZ - 1) / STACK_SLOT_SZ, 0);
+ if (slot_lo <= slot_hi) {
+ mask = SPIS_ZERO;
+ spis_or_range(&mask, slot_lo, slot_hi);
+ bpf_mark_stack_write(env, frame, mask);
+ }
+ }
+ return 0;
+}
+
+/*
+ * 'arg' is FP-derived argument to helper/kfunc or load/store that
+ * reads (positive) or writes (negative) 'access_bytes' into 'use' or 'def'.
+ */
+static int record_stack_access(struct bpf_verifier_env *env,
+ struct func_instance *instance,
+ const struct arg_track *arg,
+ s64 access_bytes, u32 frame, u32 insn_idx)
+{
+ int i, err;
+
+ if (access_bytes == 0)
+ return 0;
+ if (arg->off_cnt == 0) {
+ if (access_bytes > 0 || access_bytes == S64_MIN)
+ return mark_stack_read(instance, frame, insn_idx, SPIS_ALL);
+ return 0;
+ }
+ if (access_bytes != S64_MIN && access_bytes < 0 && arg->off_cnt != 1)
+ /* multi-offset write cannot set stack_def */
+ return 0;
+
+ for (i = 0; i < arg->off_cnt; i++) {
+ err = record_stack_access_off(env, instance, arg->off[i], access_bytes, frame, insn_idx);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * When a pointer is ARG_IMPRECISE, conservatively mark every frame in
+ * the bitmask as fully used.
+ */
+static int record_imprecise(struct func_instance *instance, u32 mask, u32 insn_idx)
+{
+ int depth = instance->callchain.curframe;
+ int f, err;
+
+ for (f = 0; mask; f++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+ if (f <= depth) {
+ err = mark_stack_read(instance, f, insn_idx, SPIS_ALL);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+/* Record load/store access for a given 'at' state of 'insn'. */
+static int record_load_store_access(struct bpf_verifier_env *env,
+ struct func_instance *instance,
+ struct arg_track *at, int insn_idx)
+{
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ int depth = instance->callchain.curframe;
+ s32 sz = bpf_size_to_bytes(BPF_SIZE(insn->code));
+ u8 class = BPF_CLASS(insn->code);
+ struct arg_track resolved, *ptr;
+ int oi;
+
+ switch (class) {
+ case BPF_LDX:
+ ptr = &at[insn->src_reg];
+ break;
+ case BPF_STX:
+ if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+ if (insn->imm == BPF_STORE_REL)
+ sz = -sz;
+ if (insn->imm == BPF_LOAD_ACQ)
+ ptr = &at[insn->src_reg];
+ else
+ ptr = &at[insn->dst_reg];
+ } else {
+ ptr = &at[insn->dst_reg];
+ sz = -sz;
+ }
+ break;
+ case BPF_ST:
+ ptr = &at[insn->dst_reg];
+ sz = -sz;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Resolve offsets: fold insn->off into arg_track */
+ if (ptr->off_cnt > 0) {
+ resolved.off_cnt = ptr->off_cnt;
+ resolved.frame = ptr->frame;
+ for (oi = 0; oi < ptr->off_cnt; oi++) {
+ resolved.off[oi] = arg_add(ptr->off[oi], insn->off);
+ if (resolved.off[oi] == OFF_IMPRECISE) {
+ resolved.off_cnt = 0;
+ break;
+ }
+ }
+ ptr = &resolved;
+ }
+
+ if (ptr->frame >= 0 && ptr->frame <= depth)
+ return record_stack_access(env, instance, ptr, sz, ptr->frame, insn_idx);
+ if (ptr->frame == ARG_IMPRECISE)
+ return record_imprecise(instance, ptr->mask, insn_idx);
+ /* ARG_NONE: not derived from any frame pointer, skip */
+ return 0;
+}
+
+/* Record stack access for a given 'at' state of helper/kfunc 'insn' */
+static int record_call_access(struct bpf_verifier_env *env,
+ struct func_instance *instance,
+ struct arg_track *at,
+ int insn_idx)
+{
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ int depth = instance->callchain.curframe;
+ struct bpf_call_summary cs;
+ int r, err = 0, num_params = 5;
+
+ if (bpf_pseudo_call(insn))
+ return 0;
+
+ if (bpf_get_call_summary(env, insn, &cs))
+ num_params = cs.num_params;
+
+ for (r = BPF_REG_1; r < BPF_REG_1 + num_params; r++) {
+ int frame = at[r].frame;
+ s64 bytes;
+
+ if (!arg_is_fp(&at[r]))
+ continue;
+
+ if (bpf_helper_call(insn)) {
+ bytes = bpf_helper_stack_access_bytes(env, insn, r - 1, insn_idx);
+ } else if (bpf_pseudo_kfunc_call(insn)) {
+ bytes = bpf_kfunc_stack_access_bytes(env, insn, r - 1, insn_idx);
+ } else {
+ for (int f = 0; f <= depth; f++) {
+ err = mark_stack_read(instance, f, insn_idx, SPIS_ALL);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+ if (bytes == 0)
+ continue;
+
+ if (frame >= 0 && frame <= depth)
+ err = record_stack_access(env, instance, &at[r], bytes, frame, insn_idx);
+ else if (frame == ARG_IMPRECISE)
+ err = record_imprecise(instance, at[r].mask, insn_idx);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/*
* For a calls_callback helper, find the callback subprog and determine
* which caller register maps to which callback register for FP passthrough.
if (changed)
goto redo;
+ /* Record memory accesses using converged at_in (RPO skips dead code) */
+ for (p = po_end - 1; p >= po_start; p--) {
+ int idx = env->cfg.insn_postorder[p];
+ int i = idx - start;
+ struct bpf_insn *insn = &insns[idx];
+
+ reset_stack_write_marks(env, instance);
+ err = record_load_store_access(env, instance, at_in[i], idx);
+ if (err)
+ goto err_free;
+
+ if (insn->code == (BPF_JMP | BPF_CALL)) {
+ err = record_call_access(env, instance, at_in[i], idx);
+ if (err)
+ goto err_free;
+ }
+
+ if (bpf_pseudo_call(insn) || bpf_calls_callback(env, idx)) {
+ kvfree(env->callsite_at_stack[idx]);
+ env->callsite_at_stack[idx] =
+ kvmalloc_objs(*env->callsite_at_stack[idx],
+ MAX_ARG_SPILL_SLOTS, GFP_KERNEL_ACCOUNT);
+ if (!env->callsite_at_stack[idx]) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ memcpy(env->callsite_at_stack[idx],
+ at_stack_in[i], sizeof(struct arg_track) * MAX_ARG_SPILL_SLOTS);
+ }
+ err = commit_stack_write_marks(env, instance, idx);
+ if (err)
+ goto err_free;
+ }
+
info->at_in = at_in;
at_in = NULL;
info->len = len;
return err;
dst_state->speculative = src->speculative;
dst_state->in_sleepable = src->in_sleepable;
- dst_state->cleaned = src->cleaned;
dst_state->curframe = src->curframe;
dst_state->branches = src->branches;
dst_state->parent = src->parent;
{
int i, err;
- if (env->cur_state != st)
- st->cleaned = true;
err = bpf_live_stack_query_init(env, st);
if (err)
return err;
return 0;
}
-/* the parentage chains form a tree.
- * the verifier states are added to state lists at given insn and
- * pushed into state stack for future exploration.
- * when the verifier reaches bpf_exit insn some of the verifier states
- * stored in the state lists have their final liveness state already,
- * but a lot of states will get revised from liveness point of view when
- * the verifier explores other branches.
- * Example:
- * 1: *(u64)(r10 - 8) = 1
- * 2: if r1 == 100 goto pc+1
- * 3: *(u64)(r10 - 8) = 2
- * 4: r0 = *(u64)(r10 - 8)
- * 5: exit
- * when the verifier reaches exit insn the stack slot -8 in the state list of
- * insn 2 is not yet marked alive. Then the verifier pops the other_branch
- * of insn 2 and goes exploring further. After the insn 4 read, liveness
- * analysis would propagate read mark for -8 at insn 2.
- *
- * Since the verifier pushes the branch states as it sees them while exploring
- * the program the condition of walking the branch instruction for the second
- * time means that all states below this branch were already explored and
- * their final liveness marks are already propagated.
- * Hence when the verifier completes the search of state list in is_state_visited()
- * we can call this clean_live_states() function to clear dead the registers and stack
- * slots to simplify state merging.
- *
- * Important note here that walking the same branch instruction in the callee
- * doesn't meant that the states are DONE. The verifier has to compare
- * the callsites
- */
-
/* Find id in idset and increment its count, or add new entry */
static void idset_cnt_inc(struct bpf_idset *idset, u32 id)
{
}));
}
-static int clean_live_states(struct bpf_verifier_env *env, int insn,
- struct bpf_verifier_state *cur)
-{
- struct bpf_verifier_state_list *sl;
- struct list_head *pos, *head;
- int err;
-
- head = explored_state(env, insn);
- list_for_each(pos, head) {
- sl = container_of(pos, struct bpf_verifier_state_list, node);
- if (sl->state.branches)
- continue;
- if (sl->state.insn_idx != insn ||
- !same_callsites(&sl->state, cur))
- continue;
- if (sl->state.cleaned)
- /* all regs in this state in all frames were already marked */
- continue;
- if (incomplete_read_marks(env, &sl->state))
- continue;
- err = clean_verifier_state(env, &sl->state);
- if (err)
- return err;
- }
- return 0;
-}
-
static bool regs_exact(const struct bpf_reg_state *rold,
const struct bpf_reg_state *rcur,
struct bpf_idmap *idmap)
env->insn_processed - env->prev_insn_processed >= 8)
add_new_state = true;
- err = clean_live_states(env, insn_idx, cur);
+ /* keep cleaning the current state as registers/stack become dead */
+ err = clean_verifier_state(env, cur);
if (err)
return err;