From: Greg Kroah-Hartman Date: Mon, 2 Aug 2021 11:31:51 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v4.4.278~29 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=80b819956eb403fe0c446bdaee3703c3bacc7440;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch --- diff --git a/queue-5.10/bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch b/queue-5.10/bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch new file mode 100644 index 00000000000..717ca0d70c3 --- /dev/null +++ b/queue-5.10/bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch @@ -0,0 +1,121 @@ +From e042aa532c84d18ff13291d00620502ce7a38dda Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Fri, 16 Jul 2021 09:18:21 +0000 +Subject: bpf: Fix pointer arithmetic mask tightening under state pruning + +From: Daniel Borkmann + +commit e042aa532c84d18ff13291d00620502ce7a38dda upstream. + +In 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic mask") we +narrowed the offset mask for unprivileged pointer arithmetic in order to +mitigate a corner case where in the speculative domain it is possible to +advance, for example, the map value pointer by up to value_size-1 out-of- +bounds in order to leak kernel memory via side-channel to user space. + +The verifier's state pruning for scalars leaves one corner case open +where in the first verification path R_x holds an unknown scalar with an +aux->alu_limit of e.g. 7, and in a second verification path that same +register R_x, here denoted as R_x', holds an unknown scalar which has +tighter bounds and would thus satisfy range_within(R_x, R_x') as well as +tnum_in(R_x, R_x') for state pruning, yielding an aux->alu_limit of 3: +Given the second path fits the register constraints for pruning, the final +generated mask from aux->alu_limit will remain at 7. While technically +not wrong for the non-speculative domain, it would however be possible +to craft similar cases where the mask would be too wide as in 7fedb63a8307. + +One way to fix it is to detect the presence of unknown scalar map pointer +arithmetic and force a deeper search on unknown scalars to ensure that +we do not run into a masking mismatch. + +Signed-off-by: Daniel Borkmann +Acked-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf_verifier.h | 1 + + kernel/bpf/verifier.c | 27 +++++++++++++++++---------- + 2 files changed, 18 insertions(+), 10 deletions(-) + +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -397,6 +397,7 @@ struct bpf_verifier_env { + struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ + u32 used_map_cnt; /* number of used maps */ + u32 id_gen; /* used to generate unique reg IDs */ ++ bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool allow_ptr_to_map_access; +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -5792,6 +5792,12 @@ static int sanitize_ptr_alu(struct bpf_v + alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; + alu_state |= ptr_is_dst_reg ? + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; ++ ++ /* Limit pruning on unknown scalars to enable deep search for ++ * potential masking differences from other program paths. ++ */ ++ if (!off_is_imm) ++ env->explore_alu_limits = true; + } + + err = update_alu_sanitation_state(aux, alu_state, alu_limit); +@@ -9088,8 +9094,8 @@ next: + } + + /* Returns true if (rold safe implies rcur safe) */ +-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, +- struct bpf_id_pair *idmap) ++static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, ++ struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) + { + bool equal; + +@@ -9115,6 +9121,8 @@ static bool regsafe(struct bpf_reg_state + return false; + switch (rold->type) { + case SCALAR_VALUE: ++ if (env->explore_alu_limits) ++ return false; + if (rcur->type == SCALAR_VALUE) { + if (!rold->precise && !rcur->precise) + return true; +@@ -9204,9 +9212,8 @@ static bool regsafe(struct bpf_reg_state + return false; + } + +-static bool stacksafe(struct bpf_func_state *old, +- struct bpf_func_state *cur, +- struct bpf_id_pair *idmap) ++static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, ++ struct bpf_func_state *cur, struct bpf_id_pair *idmap) + { + int i, spi; + +@@ -9251,9 +9258,8 @@ static bool stacksafe(struct bpf_func_st + continue; + if (old->stack[spi].slot_type[0] != STACK_SPILL) + continue; +- if (!regsafe(&old->stack[spi].spilled_ptr, +- &cur->stack[spi].spilled_ptr, +- idmap)) ++ if (!regsafe(env, &old->stack[spi].spilled_ptr, ++ &cur->stack[spi].spilled_ptr, idmap)) + /* when explored and current stack slot are both storing + * spilled registers, check that stored pointers types + * are the same as well. +@@ -9310,10 +9316,11 @@ static bool func_states_equal(struct bpf + + memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); + for (i = 0; i < MAX_BPF_REG; i++) +- if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch)) ++ if (!regsafe(env, &old->regs[i], &cur->regs[i], ++ env->idmap_scratch)) + return false; + +- if (!stacksafe(old, cur, env->idmap_scratch)) ++ if (!stacksafe(env, old, cur, env->idmap_scratch)) + return false; + + if (!refsafe(old, cur)) diff --git a/queue-5.10/bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch b/queue-5.10/bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch new file mode 100644 index 00000000000..020ce21220e --- /dev/null +++ b/queue-5.10/bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch @@ -0,0 +1,149 @@ +From c9e73e3d2b1eb1ea7ff068e05007eec3bd8ef1c9 Mon Sep 17 00:00:00 2001 +From: Lorenz Bauer +Date: Thu, 29 Apr 2021 14:46:56 +0100 +Subject: bpf: verifier: Allocate idmap scratch in verifier env + +From: Lorenz Bauer + +commit c9e73e3d2b1eb1ea7ff068e05007eec3bd8ef1c9 upstream. + +func_states_equal makes a very short lived allocation for idmap, +probably because it's too large to fit on the stack. However the +function is called quite often, leading to a lot of alloc / free +churn. Replace the temporary allocation with dedicated scratch +space in struct bpf_verifier_env. + +Signed-off-by: Lorenz Bauer +Signed-off-by: Alexei Starovoitov +Acked-by: Edward Cree +Link: https://lore.kernel.org/bpf/20210429134656.122225-4-lmb@cloudflare.com +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf_verifier.h | 8 +++++++ + kernel/bpf/verifier.c | 46 ++++++++++++++----------------------------- + 2 files changed, 23 insertions(+), 31 deletions(-) + +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -204,6 +204,13 @@ struct bpf_idx_pair { + u32 idx; + }; + ++struct bpf_id_pair { ++ u32 old; ++ u32 cur; ++}; ++ ++/* Maximum number of register states that can exist at once */ ++#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) + #define MAX_CALL_FRAMES 8 + struct bpf_verifier_state { + /* call stack tracking */ +@@ -401,6 +408,7 @@ struct bpf_verifier_env { + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; ++ struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; + struct { + int *insn_state; + int *insn_stack; +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -8962,13 +8962,6 @@ static bool range_within(struct bpf_reg_ + old->s32_max_value >= cur->s32_max_value; + } + +-/* Maximum number of register states that can exist at once */ +-#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) +-struct idpair { +- u32 old; +- u32 cur; +-}; +- + /* If in the old state two registers had the same id, then they need to have + * the same id in the new state as well. But that id could be different from + * the old state, so we need to track the mapping from old to new ids. +@@ -8979,11 +8972,11 @@ struct idpair { + * So we look through our idmap to see if this old id has been seen before. If + * so, we require the new id to match; otherwise, we add the id pair to the map. + */ +-static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) ++static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) + { + unsigned int i; + +- for (i = 0; i < ID_MAP_SIZE; i++) { ++ for (i = 0; i < BPF_ID_MAP_SIZE; i++) { + if (!idmap[i].old) { + /* Reached an empty slot; haven't seen this id before */ + idmap[i].old = old_id; +@@ -9096,7 +9089,7 @@ next: + + /* Returns true if (rold safe implies rcur safe) */ + static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, +- struct idpair *idmap) ++ struct bpf_id_pair *idmap) + { + bool equal; + +@@ -9213,7 +9206,7 @@ static bool regsafe(struct bpf_reg_state + + static bool stacksafe(struct bpf_func_state *old, + struct bpf_func_state *cur, +- struct idpair *idmap) ++ struct bpf_id_pair *idmap) + { + int i, spi; + +@@ -9310,32 +9303,23 @@ static bool refsafe(struct bpf_func_stat + * whereas register type in current state is meaningful, it means that + * the current state will reach 'bpf_exit' instruction safely + */ +-static bool func_states_equal(struct bpf_func_state *old, ++static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, + struct bpf_func_state *cur) + { +- struct idpair *idmap; +- bool ret = false; + int i; + +- idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); +- /* If we failed to allocate the idmap, just say it's not safe */ +- if (!idmap) +- return false; +- +- for (i = 0; i < MAX_BPF_REG; i++) { +- if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) +- goto out_free; +- } ++ memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); ++ for (i = 0; i < MAX_BPF_REG; i++) ++ if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch)) ++ return false; + +- if (!stacksafe(old, cur, idmap)) +- goto out_free; ++ if (!stacksafe(old, cur, env->idmap_scratch)) ++ return false; + + if (!refsafe(old, cur)) +- goto out_free; +- ret = true; +-out_free: +- kfree(idmap); +- return ret; ++ return false; ++ ++ return true; + } + + static bool states_equal(struct bpf_verifier_env *env, +@@ -9362,7 +9346,7 @@ static bool states_equal(struct bpf_veri + for (i = 0; i <= old->curframe; i++) { + if (old->frame[i]->callsite != cur->frame[i]->callsite) + return false; +- if (!func_states_equal(old->frame[i], cur->frame[i])) ++ if (!func_states_equal(env, old->frame[i], cur->frame[i])) + return false; + } + return true; diff --git a/queue-5.10/series b/queue-5.10/series index d431cad8c25..231c1e1b1d2 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -58,3 +58,5 @@ can-hi311x-fix-a-signedness-bug-in-hi3110_cmd.patch bpf-introduce-bpf-nospec-instruction-for-mitigating-.patch bpf-fix-leakage-due-to-insufficient-speculative-stor.patch bpf-remove-superfluous-aux-sanitation-on-subprog-rejection.patch +bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch +bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch