]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 Aug 2021 11:32:06 +0000 (13:32 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 2 Aug 2021 11:32:06 +0000 (13:32 +0200)
added patches:
bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch
bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch

queue-5.13/bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch [new file with mode: 0644]
queue-5.13/bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch [new file with mode: 0644]
queue-5.13/series

diff --git a/queue-5.13/bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch b/queue-5.13/bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch
new file mode 100644 (file)
index 0000000..5a2ce44
--- /dev/null
@@ -0,0 +1,121 @@
+From e042aa532c84d18ff13291d00620502ce7a38dda Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 16 Jul 2021 09:18:21 +0000
+Subject: bpf: Fix pointer arithmetic mask tightening under state pruning
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit e042aa532c84d18ff13291d00620502ce7a38dda upstream.
+
+In 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic mask") we
+narrowed the offset mask for unprivileged pointer arithmetic in order to
+mitigate a corner case where in the speculative domain it is possible to
+advance, for example, the map value pointer by up to value_size-1 out-of-
+bounds in order to leak kernel memory via side-channel to user space.
+
+The verifier's state pruning for scalars leaves one corner case open
+where in the first verification path R_x holds an unknown scalar with an
+aux->alu_limit of e.g. 7, and in a second verification path that same
+register R_x, here denoted as R_x', holds an unknown scalar which has
+tighter bounds and would thus satisfy range_within(R_x, R_x') as well as
+tnum_in(R_x, R_x') for state pruning, yielding an aux->alu_limit of 3:
+Given the second path fits the register constraints for pruning, the final
+generated mask from aux->alu_limit will remain at 7. While technically
+not wrong for the non-speculative domain, it would however be possible
+to craft similar cases where the mask would be too wide as in 7fedb63a8307.
+
+One way to fix it is to detect the presence of unknown scalar map pointer
+arithmetic and force a deeper search on unknown scalars to ensure that
+we do not run into a masking mismatch.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf_verifier.h |    1 +
+ kernel/bpf/verifier.c        |   27 +++++++++++++++++----------
+ 2 files changed, 18 insertions(+), 10 deletions(-)
+
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -414,6 +414,7 @@ struct bpf_verifier_env {
+       u32 used_map_cnt;               /* number of used maps */
+       u32 used_btf_cnt;               /* number of used BTF objects */
+       u32 id_gen;                     /* used to generate unique reg IDs */
++      bool explore_alu_limits;
+       bool allow_ptr_leaks;
+       bool allow_uninit_stack;
+       bool allow_ptr_to_map_access;
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6535,6 +6535,12 @@ static int sanitize_ptr_alu(struct bpf_v
+               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+               alu_state |= ptr_is_dst_reg ?
+                            BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
++
++              /* Limit pruning on unknown scalars to enable deep search for
++               * potential masking differences from other program paths.
++               */
++              if (!off_is_imm)
++                      env->explore_alu_limits = true;
+       }
+       err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+@@ -9905,8 +9911,8 @@ next:
+ }
+ /* Returns true if (rold safe implies rcur safe) */
+-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
+-                  struct bpf_id_pair *idmap)
++static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
++                  struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
+ {
+       bool equal;
+@@ -9932,6 +9938,8 @@ static bool regsafe(struct bpf_reg_state
+               return false;
+       switch (rold->type) {
+       case SCALAR_VALUE:
++              if (env->explore_alu_limits)
++                      return false;
+               if (rcur->type == SCALAR_VALUE) {
+                       if (!rold->precise && !rcur->precise)
+                               return true;
+@@ -10022,9 +10030,8 @@ static bool regsafe(struct bpf_reg_state
+       return false;
+ }
+-static bool stacksafe(struct bpf_func_state *old,
+-                    struct bpf_func_state *cur,
+-                    struct bpf_id_pair *idmap)
++static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
++                    struct bpf_func_state *cur, struct bpf_id_pair *idmap)
+ {
+       int i, spi;
+@@ -10069,9 +10076,8 @@ static bool stacksafe(struct bpf_func_st
+                       continue;
+               if (old->stack[spi].slot_type[0] != STACK_SPILL)
+                       continue;
+-              if (!regsafe(&old->stack[spi].spilled_ptr,
+-                           &cur->stack[spi].spilled_ptr,
+-                           idmap))
++              if (!regsafe(env, &old->stack[spi].spilled_ptr,
++                           &cur->stack[spi].spilled_ptr, idmap))
+                       /* when explored and current stack slot are both storing
+                        * spilled registers, check that stored pointers types
+                        * are the same as well.
+@@ -10128,10 +10134,11 @@ static bool func_states_equal(struct bpf
+       memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
+       for (i = 0; i < MAX_BPF_REG; i++)
+-              if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
++              if (!regsafe(env, &old->regs[i], &cur->regs[i],
++                           env->idmap_scratch))
+                       return false;
+-      if (!stacksafe(old, cur, env->idmap_scratch))
++      if (!stacksafe(env, old, cur, env->idmap_scratch))
+               return false;
+       if (!refsafe(old, cur))
diff --git a/queue-5.13/bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch b/queue-5.13/bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch
new file mode 100644 (file)
index 0000000..dfbc4dc
--- /dev/null
@@ -0,0 +1,149 @@
+From c9e73e3d2b1eb1ea7ff068e05007eec3bd8ef1c9 Mon Sep 17 00:00:00 2001
+From: Lorenz Bauer <lmb@cloudflare.com>
+Date: Thu, 29 Apr 2021 14:46:56 +0100
+Subject: bpf: verifier: Allocate idmap scratch in verifier env
+
+From: Lorenz Bauer <lmb@cloudflare.com>
+
+commit c9e73e3d2b1eb1ea7ff068e05007eec3bd8ef1c9 upstream.
+
+func_states_equal makes a very short lived allocation for idmap,
+probably because it's too large to fit on the stack. However the
+function is called quite often, leading to a lot of alloc / free
+churn. Replace the temporary allocation with dedicated scratch
+space in struct bpf_verifier_env.
+
+Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Edward Cree <ecree.xilinx@gmail.com>
+Link: https://lore.kernel.org/bpf/20210429134656.122225-4-lmb@cloudflare.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf_verifier.h |    8 +++++++
+ kernel/bpf/verifier.c        |   46 ++++++++++++++-----------------------------
+ 2 files changed, 23 insertions(+), 31 deletions(-)
+
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -215,6 +215,13 @@ struct bpf_idx_pair {
+       u32 idx;
+ };
++struct bpf_id_pair {
++      u32 old;
++      u32 cur;
++};
++
++/* Maximum number of register states that can exist at once */
++#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
+ #define MAX_CALL_FRAMES 8
+ struct bpf_verifier_state {
+       /* call stack tracking */
+@@ -418,6 +425,7 @@ struct bpf_verifier_env {
+       const struct bpf_line_info *prev_linfo;
+       struct bpf_verifier_log log;
+       struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
++      struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
+       struct {
+               int *insn_state;
+               int *insn_stack;
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -9779,13 +9779,6 @@ static bool range_within(struct bpf_reg_
+              old->s32_max_value >= cur->s32_max_value;
+ }
+-/* Maximum number of register states that can exist at once */
+-#define ID_MAP_SIZE   (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
+-struct idpair {
+-      u32 old;
+-      u32 cur;
+-};
+-
+ /* If in the old state two registers had the same id, then they need to have
+  * the same id in the new state as well.  But that id could be different from
+  * the old state, so we need to track the mapping from old to new ids.
+@@ -9796,11 +9789,11 @@ struct idpair {
+  * So we look through our idmap to see if this old id has been seen before.  If
+  * so, we require the new id to match; otherwise, we add the id pair to the map.
+  */
+-static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
++static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
+ {
+       unsigned int i;
+-      for (i = 0; i < ID_MAP_SIZE; i++) {
++      for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
+               if (!idmap[i].old) {
+                       /* Reached an empty slot; haven't seen this id before */
+                       idmap[i].old = old_id;
+@@ -9913,7 +9906,7 @@ next:
+ /* Returns true if (rold safe implies rcur safe) */
+ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
+-                  struct idpair *idmap)
++                  struct bpf_id_pair *idmap)
+ {
+       bool equal;
+@@ -10031,7 +10024,7 @@ static bool regsafe(struct bpf_reg_state
+ static bool stacksafe(struct bpf_func_state *old,
+                     struct bpf_func_state *cur,
+-                    struct idpair *idmap)
++                    struct bpf_id_pair *idmap)
+ {
+       int i, spi;
+@@ -10128,32 +10121,23 @@ static bool refsafe(struct bpf_func_stat
+  * whereas register type in current state is meaningful, it means that
+  * the current state will reach 'bpf_exit' instruction safely
+  */
+-static bool func_states_equal(struct bpf_func_state *old,
++static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
+                             struct bpf_func_state *cur)
+ {
+-      struct idpair *idmap;
+-      bool ret = false;
+       int i;
+-      idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
+-      /* If we failed to allocate the idmap, just say it's not safe */
+-      if (!idmap)
+-              return false;
+-
+-      for (i = 0; i < MAX_BPF_REG; i++) {
+-              if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
+-                      goto out_free;
+-      }
++      memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
++      for (i = 0; i < MAX_BPF_REG; i++)
++              if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
++                      return false;
+-      if (!stacksafe(old, cur, idmap))
+-              goto out_free;
++      if (!stacksafe(old, cur, env->idmap_scratch))
++              return false;
+       if (!refsafe(old, cur))
+-              goto out_free;
+-      ret = true;
+-out_free:
+-      kfree(idmap);
+-      return ret;
++              return false;
++
++      return true;
+ }
+ static bool states_equal(struct bpf_verifier_env *env,
+@@ -10180,7 +10164,7 @@ static bool states_equal(struct bpf_veri
+       for (i = 0; i <= old->curframe; i++) {
+               if (old->frame[i]->callsite != cur->frame[i]->callsite)
+                       return false;
+-              if (!func_states_equal(old->frame[i], cur->frame[i]))
++              if (!func_states_equal(env, old->frame[i], cur->frame[i]))
+                       return false;
+       }
+       return true;
index e2dbfc35c41e32ebe896a04f6b985db49da7910a..5c937f89cc7e767635c5896e04cd5477ad094776 100644 (file)
@@ -91,3 +91,5 @@ can-hi311x-fix-a-signedness-bug-in-hi3110_cmd.patch
 bpf-introduce-bpf-nospec-instruction-for-mitigating-.patch
 bpf-fix-leakage-due-to-insufficient-speculative-stor.patch
 bpf-remove-superfluous-aux-sanitation-on-subprog-rejection.patch
+bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch
+bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch