const struct bpf_reg_state *rcur,
struct bpf_idmap *idmap);
+/*
+ * Check if scalar registers are exact for the purpose of not widening.
+ * More lenient than regs_exact()
+ */
+static bool scalars_exact_for_widen(const struct bpf_reg_state *rold,
+ const struct bpf_reg_state *rcur)
+{
+ return !memcmp(rold, rcur, offsetof(struct bpf_reg_state, id));
+}
+
static void maybe_widen_reg(struct bpf_verifier_env *env,
- struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
- struct bpf_idmap *idmap)
+ struct bpf_reg_state *rold, struct bpf_reg_state *rcur)
{
if (rold->type != SCALAR_VALUE)
return;
if (rold->type != rcur->type)
return;
- if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap))
+ if (rold->precise || rcur->precise || scalars_exact_for_widen(rold, rcur))
return;
__mark_reg_unknown(env, rcur);
}
struct bpf_func_state *fold, *fcur;
int i, fr, num_slots;
- reset_idmap_scratch(env);
for (fr = old->curframe; fr >= 0; fr--) {
fold = old->frame[fr];
fcur = cur->frame[fr];
for (i = 0; i < MAX_BPF_REG; i++)
maybe_widen_reg(env,
&fold->regs[i],
- &fcur->regs[i],
- &env->idmap_scratch);
+ &fcur->regs[i]);
num_slots = min(fold->allocated_stack / BPF_REG_SIZE,
fcur->allocated_stack / BPF_REG_SIZE);
maybe_widen_reg(env,
&fold->stack[i].spilled_ptr,
- &fcur->stack[i].spilled_ptr,
- &env->idmap_scratch);
+ &fcur->stack[i].spilled_ptr);
}
}
return 0;