};
struct per_frame_masks {
- u64 may_read; /* stack slots that may be read by this instruction */
- u64 must_write; /* stack slots written by this instruction */
- u64 must_write_acc; /* stack slots written by this instruction and its successors */
- u64 live_before; /* stack slots that may be read by this insn and its successors */
+ spis_t may_read; /* stack slots that may be read by this instruction */
+ spis_t must_write; /* stack slots written by this instruction */
+ spis_t must_write_acc; /* stack slots written by this instruction and its successors */
+ spis_t live_before; /* stack slots that may be read by this insn and its successors */
};
/*
* Below fields are used to accumulate stack write marks for instruction at
* @write_insn_idx before submitting the marks to @cur_instance.
*/
- u64 write_masks_acc[MAX_CALL_FRAMES];
+ spis_t write_masks_acc[MAX_CALL_FRAMES];
u32 write_insn_idx;
};
/* Accumulate may_read masks for @frame at @insn_idx */
static int mark_stack_read(struct bpf_verifier_env *env,
- struct func_instance *instance, u32 frame, u32 insn_idx, u64 mask)
+ struct func_instance *instance, u32 frame, u32 insn_idx, spis_t mask)
{
struct per_frame_masks *masks;
- u64 new_may_read;
+ spis_t new_may_read;
masks = alloc_frame_masks(env, instance, frame, insn_idx);
if (IS_ERR(masks))
return PTR_ERR(masks);
- new_may_read = masks->may_read | mask;
- if (new_may_read != masks->may_read &&
- ((new_may_read | masks->live_before) != masks->live_before))
+ new_may_read = spis_or(masks->may_read, mask);
+ if (!spis_equal(new_may_read, masks->may_read) &&
+ !spis_equal(spis_or(new_may_read, masks->live_before),
+ masks->live_before))
instance->updated = true;
- masks->may_read |= mask;
+ masks->may_read = spis_or(masks->may_read, mask);
return 0;
}
-int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frame, u32 insn_idx, u64 mask)
+int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frame, u32 insn_idx, spis_t mask)
{
int err;
liveness->write_insn_idx = insn_idx;
for (i = 0; i <= instance->callchain.curframe; i++)
- liveness->write_masks_acc[i] = 0;
+ liveness->write_masks_acc[i] = SPIS_ZERO;
}
int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx)
return 0;
}
-void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frame, u64 mask)
+void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frame, spis_t mask)
{
- env->liveness->write_masks_acc[frame] |= mask;
+ env->liveness->write_masks_acc[frame] = spis_or(env->liveness->write_masks_acc[frame], mask);
}
static int commit_stack_write_marks(struct bpf_verifier_env *env,
struct func_instance *instance)
{
struct bpf_liveness *liveness = env->liveness;
- u32 idx, frame, curframe, old_must_write;
+ u32 idx, frame, curframe;
struct per_frame_masks *masks;
- u64 mask;
+ spis_t mask, old_must_write, dropped;
if (!instance)
return 0;
for (frame = 0; frame <= curframe; frame++) {
mask = liveness->write_masks_acc[frame];
/* avoid allocating frames for zero masks */
- if (mask == 0 && !instance->must_write_set[idx])
+ if (spis_is_zero(mask) && !instance->must_write_set[idx])
continue;
masks = alloc_frame_masks(env, instance, frame, liveness->write_insn_idx);
if (IS_ERR(masks))
* to @mask. Otherwise take intersection with the previous value.
*/
if (instance->must_write_set[idx])
- mask &= old_must_write;
- if (old_must_write != mask) {
+ mask = spis_and(mask, old_must_write);
+ if (!spis_equal(old_must_write, mask)) {
masks->must_write = mask;
instance->updated = true;
}
- if (old_must_write & ~mask)
+ /* dropped = old_must_write & ~mask */
+ dropped = spis_and(old_must_write, spis_not(mask));
+ if (!spis_is_zero(dropped))
instance->must_write_dropped = true;
}
instance->must_write_set[idx] = true;
return env->tmp_str_buf;
}
+/*
+ * When both halves of an 8-byte SPI are set, print as "-8","-16",...
+ * When only one half is set, print as "-4h","-8h",...
+ */
+static void bpf_fmt_spis_mask(char *buf, ssize_t buf_sz, spis_t spis)
+{
+ bool first = true;
+ int spi, n;
+
+ buf[0] = '\0';
+
+ for (spi = 0; spi < STACK_SLOTS / 2 && buf_sz > 0; spi++) {
+ bool lo = spis_test_bit(spis, spi * 2);
+ bool hi = spis_test_bit(spis, spi * 2 + 1);
+
+ if (!lo && !hi)
+ continue;
+ n = snprintf(buf, buf_sz, "%s%d%s",
+ first ? "" : ",",
+ -(spi + 1) * BPF_REG_SIZE + (lo && !hi ? BPF_HALF_REG_SIZE : 0),
+ lo && hi ? "" : "h");
+ first = false;
+ buf += n;
+ buf_sz -= n;
+ }
+}
+
static void log_mask_change(struct bpf_verifier_env *env, struct callchain *callchain,
- char *pfx, u32 frame, u32 insn_idx, u64 old, u64 new)
+ char *pfx, u32 frame, u32 insn_idx,
+ spis_t old, spis_t new)
{
- u64 changed_bits = old ^ new;
- u64 new_ones = new & changed_bits;
- u64 new_zeros = ~new & changed_bits;
+ spis_t changed_bits, new_ones, new_zeros;
+
+ changed_bits = spis_xor(old, new);
+ new_ones = spis_and(new, changed_bits);
+ new_zeros = spis_and(spis_not(new), changed_bits);
- if (!changed_bits)
+ if (spis_is_zero(changed_bits))
return;
bpf_log(&env->log, "%s frame %d insn %d ", fmt_callchain(env, callchain), frame, insn_idx);
- if (new_ones) {
- bpf_fmt_stack_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_ones);
+ if (!spis_is_zero(new_ones)) {
+ bpf_fmt_spis_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_ones);
bpf_log(&env->log, "+%s %s ", pfx, env->tmp_str_buf);
}
- if (new_zeros) {
- bpf_fmt_stack_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_zeros);
+ if (!spis_is_zero(new_zeros)) {
+ bpf_fmt_spis_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_zeros);
bpf_log(&env->log, "-%s %s", pfx, env->tmp_str_buf);
}
bpf_log(&env->log, "\n");
struct func_instance *instance, u32 frame, u32 insn_idx)
{
struct bpf_insn_aux_data *aux = env->insn_aux_data;
- u64 new_before, new_after, must_write_acc;
+ spis_t new_before, new_after, must_write_acc;
struct per_frame_masks *insn, *succ_insn;
struct bpf_iarray *succ;
u32 s;
changed = false;
insn = get_frame_masks(instance, frame, insn_idx);
- new_before = 0;
- new_after = 0;
+ new_before = SPIS_ZERO;
+ new_after = SPIS_ZERO;
/*
* New "must_write_acc" is an intersection of all "must_write_acc"
* of successors plus all "must_write" slots of instruction itself.
*/
- must_write_acc = U64_MAX;
+ must_write_acc = SPIS_ALL;
for (s = 0; s < succ->cnt; ++s) {
succ_insn = get_frame_masks(instance, frame, succ->items[s]);
- new_after |= succ_insn->live_before;
- must_write_acc &= succ_insn->must_write_acc;
+ new_after = spis_or(new_after, succ_insn->live_before);
+ must_write_acc = spis_and(must_write_acc, succ_insn->must_write_acc);
}
- must_write_acc |= insn->must_write;
+ must_write_acc = spis_or(must_write_acc, insn->must_write);
/*
* New "live_before" is a union of all "live_before" of successors
* minus slots written by instruction plus slots read by instruction.
+ * new_before = (new_after & ~insn->must_write) | insn->may_read
*/
- new_before = (new_after & ~insn->must_write) | insn->may_read;
- changed |= new_before != insn->live_before;
- changed |= must_write_acc != insn->must_write_acc;
+ new_before = spis_or(spis_and(new_after, spis_not(insn->must_write)),
+ insn->may_read);
+ changed |= !spis_equal(new_before, insn->live_before);
+ changed |= !spis_equal(must_write_acc, insn->must_write_acc);
if (unlikely(env->log.level & BPF_LOG_LEVEL2) &&
- (insn->may_read || insn->must_write ||
+ (!spis_is_zero(insn->may_read) || !spis_is_zero(insn->must_write) ||
insn_idx == callchain_subprog_start(&instance->callchain) ||
aux[insn_idx].prune_point)) {
log_mask_change(env, &instance->callchain, "live",
for (i = 0; i < instance->insn_cnt; i++) {
insn = get_frame_masks(instance, frame, this_subprog_start + i);
- insn->must_write_acc = 0;
+ insn->must_write_acc = SPIS_ZERO;
}
}
}
struct per_frame_masks *masks;
masks = get_frame_masks(instance, frameno, insn_idx);
- return masks && (masks->live_before & BIT(spi));
+ return masks && (spis_test_bit(masks->live_before, spi * 2) ||
+ spis_test_bit(masks->live_before, spi * 2 + 1));
}
int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
}
- bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - 1));
return 0;
}
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
- bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - 1));
}
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
- bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - 1));
return 0;
}
for (j = 0; j < BPF_REG_SIZE; j++)
slot->slot_type[j] = STACK_ITER;
- bpf_mark_stack_write(env, state->frameno, BIT(spi - i));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - i));
mark_stack_slot_scratched(env, spi - i);
}
for (j = 0; j < BPF_REG_SIZE; j++)
slot->slot_type[j] = STACK_INVALID;
- bpf_mark_stack_write(env, state->frameno, BIT(spi - i));
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - i));
mark_stack_slot_scratched(env, spi - i);
}
slot = &state->stack[spi];
st = &slot->spilled_ptr;
- bpf_mark_stack_write(env, reg->frameno, BIT(spi));
+ bpf_mark_stack_write(env, reg->frameno, spis_single_slot(spi));
__mark_reg_known_zero(st);
st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
st->ref_obj_id = id;
__mark_reg_not_init(env, st);
- bpf_mark_stack_write(env, reg->frameno, BIT(spi));
+ bpf_mark_stack_write(env, reg->frameno, spis_single_slot(spi));
for (i = 0; i < BPF_REG_SIZE; i++)
slot->slot_type[i] = STACK_INVALID;
int err, i;
for (i = 0; i < nr_slots; i++) {
- err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi - i));
+ err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx,
+ spis_single_slot(spi - i));
if (err)
return err;
mark_stack_slot_scratched(env, spi - i);
if (err)
return err;
- if (!(off % BPF_REG_SIZE) && size == BPF_REG_SIZE) {
- /* only mark the slot as written if all 8 bytes were written
- * otherwise read propagation may incorrectly stop too soon
- * when stack slots are partially written.
- * This heuristic means that read propagation will be
- * conservative, since it will add reg_live_read marks
- * to stack slots all the way to first state when programs
- * writes+reads less than 8 bytes
- */
- bpf_mark_stack_write(env, state->frameno, BIT(spi));
- }
+ if (!(off % BPF_REG_SIZE) && size == BPF_REG_SIZE)
+ /* 8-byte aligned, 8-byte write */
+ bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
+ else if (!(off % BPF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
+ /* 8-byte aligned, 4-byte write */
+ bpf_mark_stack_write(env, state->frameno, spis_one_bit(spi * 2 + 1));
+ else if (!(off % BPF_HALF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
+ /* 4-byte aligned, 4-byte write */
+ bpf_mark_stack_write(env, state->frameno, spis_one_bit(spi * 2));
check_fastcall_stack_contract(env, state, insn_idx, off);
mark_stack_slot_scratched(env, spi);
struct bpf_reg_state *reg;
u8 *stype, type;
int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
+ spis_t mask;
int err;
stype = reg_state->stack[spi].slot_type;
mark_stack_slot_scratched(env, spi);
check_fastcall_stack_contract(env, state, env->insn_idx, off);
- err = bpf_mark_stack_read(env, reg_state->frameno, env->insn_idx, BIT(spi));
+ if (!(off % BPF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
+ /* 8-byte aligned, 4-byte read */
+ mask = spis_one_bit(spi * 2 + 1);
+ else if (!(off % BPF_HALF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
+ /* 4-byte aligned, 4-byte read */
+ mask = spis_one_bit(spi * 2);
+ else
+ mask = spis_single_slot(spi);
+
+ err = bpf_mark_stack_read(env, reg_state->frameno, env->insn_idx, mask);
if (err)
return err;
/* reading any byte out of 8-byte 'spill_slot' will cause
* the whole slot to be marked as 'read'
*/
- err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi));
+ err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx,
+ spis_single_slot(spi));
if (err)
return err;
/* We do not call bpf_mark_stack_write(), as we can not