return reg->type != SCALAR_VALUE;
}
+static void clear_scalar_id(struct bpf_reg_state *reg)
+{
+ reg->id = 0;
+ reg->delta = 0;
+}
+
static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg)
{
if (src_reg->type != SCALAR_VALUE)
return;
-
- if (src_reg->id & BPF_ADD_CONST) {
- /*
- * The verifier is processing rX = rY insn and
- * rY->id has special linked register already.
- * Cleared it, since multiple rX += const are not supported.
- */
- src_reg->id = 0;
- src_reg->delta = 0;
- }
-
+ /*
+ * The verifier is processing rX = rY insn and
+ * rY->id has special linked register already.
+ * Cleared it, since multiple rX += const are not supported.
+ */
+ if (src_reg->id & BPF_ADD_CONST)
+ clear_scalar_id(src_reg);
+ /*
+ * Ensure that src_reg has a valid ID that will be copied to
+ * dst_reg and then will be used by sync_linked_regs() to
+ * propagate min/max range.
+ */
if (!src_reg->id && !tnum_is_const(src_reg->var_off))
- /* Ensure that src_reg has a valid ID that will be copied to
- * dst_reg and then will be used by sync_linked_regs() to
- * propagate min/max range.
- */
src_reg->id = ++env->id_gen;
}
* coerce_reg_to_size will adjust the boundaries.
*/
if (get_reg_width(reg) > size * BITS_PER_BYTE)
- state->regs[dst_regno].id = 0;
+ clear_scalar_id(&state->regs[dst_regno]);
} else {
int spill_cnt = 0, zero_cnt = 0;
* any existing ties and avoid incorrect bounds propagation.
*/
if (need_bswap || insn->imm == 16 || insn->imm == 32)
- dst_reg->id = 0;
+ clear_scalar_id(dst_reg);
if (need_bswap) {
if (insn->imm == 16)
* we cannot accumulate another val into rx->off.
*/
clear_id:
- dst_reg->delta = 0;
- dst_reg->id = 0;
+ clear_scalar_id(dst_reg);
} else {
if (alu32)
dst_reg->id |= BPF_ADD_CONST32;
* Make sure ID is cleared otherwise dst_reg min/max could be
* incorrectly propagated into other registers by sync_linked_regs()
*/
- dst_reg->id = 0;
+ clear_scalar_id(dst_reg);
}
return 0;
}
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
- dst_reg->id = 0;
+ clear_scalar_id(dst_reg);
coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
dst_reg->subreg_def = DEF_NOT_SUBREG;
} else {
* propagated into src_reg by sync_linked_regs()
*/
if (!is_src_reg_u32)
- dst_reg->id = 0;
+ clear_scalar_id(dst_reg);
dst_reg->subreg_def = env->insn_idx + 1;
} else {
/* case: W1 = (s8, s16)W2 */
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
- dst_reg->id = 0;
+ clear_scalar_id(dst_reg);
dst_reg->subreg_def = env->insn_idx + 1;
coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
}
e->is_reg = is_reg;
e->regno = spi_or_reg;
} else {
- reg->id = 0;
+ clear_scalar_id(reg);
}
}
continue;
if (!reg->id)
continue;
- if (idset_cnt_get(idset, reg->id & ~BPF_ADD_CONST) == 1) {
- reg->id = 0;
- reg->delta = 0;
- }
+ if (idset_cnt_get(idset, reg->id & ~BPF_ADD_CONST) == 1)
+ clear_scalar_id(reg);
}));
}