state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
return 0;
}
*/
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
}
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
/* Same reason as unmark_stack_slots_dynptr above */
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
return 0;
}
for (j = 0; j < BPF_REG_SIZE; j++)
slot->slot_type[j] = STACK_ITER;
+ bpf_mark_stack_write(env, state->frameno, BIT(spi - i));
mark_stack_slot_scratched(env, spi - i);
}
for (j = 0; j < BPF_REG_SIZE; j++)
slot->slot_type[j] = STACK_INVALID;
+ bpf_mark_stack_write(env, state->frameno, BIT(spi - i));
mark_stack_slot_scratched(env, spi - i);
}
slot = &state->stack[spi];
st = &slot->spilled_ptr;
+ bpf_mark_stack_write(env, reg->frameno, BIT(spi));
__mark_reg_known_zero(st);
st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
st->live |= REG_LIVE_WRITTEN;
/* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
st->live |= REG_LIVE_WRITTEN;
+ bpf_mark_stack_write(env, reg->frameno, BIT(spi));
for (i = 0; i < BPF_REG_SIZE; i++)
slot->slot_type[i] = STACK_INVALID;
if (err)
return err;
+ err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi - i));
+ if (err)
+ return err;
mark_stack_slot_scratched(env, spi - i);
}
return 0;
if (err)
return err;
+ if (!(off % BPF_REG_SIZE) && size == BPF_REG_SIZE) {
+ /* only mark the slot as written if all 8 bytes were written
+ * otherwise read propagation may incorrectly stop too soon
+ * when stack slots are partially written.
+ * This heuristic means that read propagation will be
+ * conservative, since it will add reg_live_read marks
+ * to stack slots all the way to first state when programs
+ * writes+reads less than 8 bytes
+ */
+ bpf_mark_stack_write(env, state->frameno, BIT(spi));
+ }
+
check_fastcall_stack_contract(env, state, insn_idx, off);
mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
struct bpf_reg_state *reg;
u8 *stype, type;
int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
+ int err;
stype = reg_state->stack[spi].slot_type;
reg = ®_state->stack[spi].spilled_ptr;
mark_stack_slot_scratched(env, spi);
check_fastcall_stack_contract(env, state, env->insn_idx, off);
+ err = bpf_mark_stack_read(env, reg_state->frameno, env->insn_idx, BIT(spi));
+ if (err)
+ return err;
if (is_spilled_reg(®_state->stack[spi])) {
u8 spill_size = 1;
mark_reg_read(env, &state->stack[spi].spilled_ptr,
state->stack[spi].spilled_ptr.parent,
REG_LIVE_READ64);
+ err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi));
+ if (err)
+ return err;
/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
* be sure that whether stack slot is written to or not. Hence,
* we must still conservatively propagate reads upwards even if
/* and go analyze first insn of the callee */
*insn_idx = env->subprog_info[subprog].start - 1;
+ bpf_reset_live_stack_callchain(env);
+
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "caller:\n");
print_verifier_state(env, state, caller->frameno, true);
u32 ip)
{
u16 live_regs = env->insn_aux_data[ip].live_regs_before;
- enum bpf_reg_liveness live;
int i, j;
for (i = 0; i < BPF_REG_FP; i++) {
}
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
- live = st->stack[i].spilled_ptr.live;
- /* liveness must not touch this stack slot anymore */
- if (!(live & REG_LIVE_READ)) {
+ if (!bpf_stack_slot_alive(env, st->frameno, i)) {
__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
st->stack[i].slot_type[j] = STACK_INVALID;
{
int i, ip;
+ bpf_live_stack_query_init(env, st);
st->cleaned = true;
for (i = 0; i <= st->curframe; i++) {
ip = frame_insn_idx(st, i);
if (exact == EXACT)
return regs_exact(rold, rcur, idmap);
- if (!(rold->live & REG_LIVE_READ) && exact == NOT_EXACT)
- /* explored state didn't use this */
- return true;
if (rold->type == NOT_INIT) {
if (exact == NOT_EXACT || rcur->type == NOT_INIT)
/* explored state can't have used this */
return PROCESS_BPF_EXIT;
if (env->cur_state->curframe) {
+ err = bpf_update_live_stack(env);
+ if (err)
+ return err;
/* exit from nested function */
err = prepare_func_exit(env, &env->insn_idx);
if (err)
for (;;) {
struct bpf_insn *insn;
struct bpf_insn_aux_data *insn_aux;
- int err;
+ int err, marks_err;
/* reset current history entry on each new instruction */
env->cur_hist_ent = NULL;
if (state->speculative && insn_aux->nospec)
goto process_bpf_exit;
+ err = bpf_reset_stack_write_marks(env, env->insn_idx);
+ if (err)
+ return err;
err = do_check_insn(env, &do_print_state);
+ if (err >= 0 || error_recoverable_with_nospec(err)) {
+ marks_err = bpf_commit_stack_write_marks(env);
+ if (marks_err)
+ return marks_err;
+ }
if (error_recoverable_with_nospec(err) && state->speculative) {
/* Prevent this speculative path from ever reaching the
* insn that would have been unsafe to execute.
process_bpf_exit:
mark_verifier_state_scratched(env);
err = update_branch_counts(env, env->cur_state);
+ if (err)
+ return err;
+ err = bpf_update_live_stack(env);
if (err)
return err;
err = pop_stack(env, &prev_insn_idx, &env->insn_idx,
if (ret < 0)
goto skip_full_check;
+ ret = bpf_stack_liveness_init(env);
+ if (ret)
+ goto skip_full_check;
+
ret = check_attach_btf_id(env);
if (ret)
goto skip_full_check;
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
+ bpf_stack_liveness_free(env);
kvfree(env->cfg.insn_postorder);
kvfree(env->scc_info);
kvfree(env);