--- /dev/null
+From d2f7eca60b29006285d57c7035539e33300e89e5 Mon Sep 17 00:00:00 2001
+From: Fredrik Strupe <fredrik@strupe.net>
+Date: Mon, 5 Apr 2021 21:52:05 +0100
+Subject: ARM: 9071/1: uprobes: Don't hook on thumb instructions
+
+From: Fredrik Strupe <fredrik@strupe.net>
+
+commit d2f7eca60b29006285d57c7035539e33300e89e5 upstream.
+
+Since uprobes is not supported for thumb, check that the thumb bit is
+not set when matching the uprobes instruction hooks.
+
+The Arm UDF instructions used for uprobes triggering
+(UPROBE_SWBP_ARM_INSN and UPROBE_SS_ARM_INSN) coincidentally share the
+same encoding as a pair of unallocated 32-bit thumb instructions (not
+UDF) when the condition code is 0b1111 (0xf). This in effect makes it
+possible to trigger the uprobes functionality from thumb, and at that
+using two unallocated instructions which are not permanently undefined.
+
+Signed-off-by: Fredrik Strupe <fredrik@strupe.net>
+Cc: stable@vger.kernel.org
+Fixes: c7edc9e326d5 ("ARM: add uprobes support")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/probes/uprobes/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/probes/uprobes/core.c
++++ b/arch/arm/probes/uprobes/core.c
+@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struc
+ static struct undef_hook uprobes_arm_break_hook = {
+ .instr_mask = 0x0fffffff,
+ .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
+- .cpsr_mask = MODE_MASK,
++ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
+ .cpsr_val = USR_MODE,
+ .fn = uprobe_trap_handler,
+ };
+@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_bre
+ static struct undef_hook uprobes_arm_ss_hook = {
+ .instr_mask = 0x0fffffff,
+ .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
+- .cpsr_mask = MODE_MASK,
++ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
+ .cpsr_val = USR_MODE,
+ .fn = uprobe_trap_handler,
+ };
--- /dev/null
+From a6aaece00a57fa6f22575364b3903dfbccf5345d Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Tue, 23 Mar 2021 09:30:01 +0100
+Subject: bpf: Improve verifier error messages for users
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit a6aaece00a57fa6f22575364b3903dfbccf5345d upstream.
+
+Consolidate all error handling and provide more user-friendly error messages
+from sanitize_ptr_alu() and sanitize_val_alu().
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 86 ++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 63 insertions(+), 23 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5384,6 +5384,14 @@ static struct bpf_insn_aux_data *cur_aux
+ return &env->insn_aux_data[env->insn_idx];
+ }
+
++enum {
++ REASON_BOUNDS = -1,
++ REASON_TYPE = -2,
++ REASON_PATHS = -3,
++ REASON_LIMIT = -4,
++ REASON_STACK = -5,
++};
++
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+ u32 *alu_limit, u8 opcode)
+@@ -5395,7 +5403,7 @@ static int retrieve_ptr_limit(const stru
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+- return -EACCES;
++ return REASON_BOUNDS;
+
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+@@ -5422,11 +5430,11 @@ static int retrieve_ptr_limit(const stru
+ }
+ break;
+ default:
+- return -EINVAL;
++ return REASON_TYPE;
+ }
+
+ if (ptr_limit >= max)
+- return -ERANGE;
++ return REASON_LIMIT;
+ *alu_limit = ptr_limit;
+ return 0;
+ }
+@@ -5446,7 +5454,7 @@ static int update_alu_sanitation_state(s
+ if (aux->alu_state &&
+ (aux->alu_state != alu_state ||
+ aux->alu_limit != alu_limit))
+- return -EACCES;
++ return REASON_PATHS;
+
+ /* Corresponding fixup done in fixup_bpf_calls(). */
+ aux->alu_state = alu_state;
+@@ -5519,7 +5527,46 @@ do_sim:
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+ if (!ptr_is_dst_reg && ret)
+ *dst_reg = tmp;
+- return !ret ? -EFAULT : 0;
++ return !ret ? REASON_STACK : 0;
++}
++
++static int sanitize_err(struct bpf_verifier_env *env,
++ const struct bpf_insn *insn, int reason,
++ const struct bpf_reg_state *off_reg,
++ const struct bpf_reg_state *dst_reg)
++{
++ static const char *err = "pointer arithmetic with it prohibited for !root";
++ const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
++ u32 dst = insn->dst_reg, src = insn->src_reg;
++
++ switch (reason) {
++ case REASON_BOUNDS:
++ verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
++ off_reg == dst_reg ? dst : src, err);
++ break;
++ case REASON_TYPE:
++ verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
++ off_reg == dst_reg ? src : dst, err);
++ break;
++ case REASON_PATHS:
++ verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
++ dst, op, err);
++ break;
++ case REASON_LIMIT:
++ verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
++ dst, op, err);
++ break;
++ case REASON_STACK:
++ verbose(env, "R%d could not be pushed for speculative verification, %s\n",
++ dst, err);
++ break;
++ default:
++ verbose(env, "verifier internal error: unknown reason (%d)\n",
++ reason);
++ break;
++ }
++
++ return -EACCES;
+ }
+
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+@@ -5609,10 +5656,9 @@ static int adjust_ptr_min_max_vals(struc
+ switch (opcode) {
+ case BPF_ADD:
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0) {
+- verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+ /* We can take a fixed offset as long as it doesn't overflow
+ * the s32 'off' field
+ */
+@@ -5664,10 +5710,9 @@ static int adjust_ptr_min_max_vals(struc
+ break;
+ case BPF_SUB:
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0) {
+- verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+ if (dst_reg == off_reg) {
+ /* scalar -= pointer. Creates an unknown scalar */
+ verbose(env, "R%d tried to subtract pointer from scalar\n",
+@@ -6357,9 +6402,8 @@ static int adjust_scalar_min_max_vals(st
+ s32 s32_min_val, s32_max_val;
+ u32 u32_min_val, u32_max_val;
+ u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+- u32 dst = insn->dst_reg;
+- int ret;
+ bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
++ int ret;
+
+ smin_val = src_reg.smin_value;
+ smax_val = src_reg.smax_value;
+@@ -6418,20 +6462,16 @@ static int adjust_scalar_min_max_vals(st
+ switch (opcode) {
+ case BPF_ADD:
+ ret = sanitize_val_alu(env, insn);
+- if (ret < 0) {
+- verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, NULL, NULL);
+ scalar32_min_max_add(dst_reg, &src_reg);
+ scalar_min_max_add(dst_reg, &src_reg);
+ dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+ break;
+ case BPF_SUB:
+ ret = sanitize_val_alu(env, insn);
+- if (ret < 0) {
+- verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, NULL, NULL);
+ scalar32_min_max_sub(dst_reg, &src_reg);
+ scalar_min_max_sub(dst_reg, &src_reg);
+ dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
--- /dev/null
+From f528819334881fd622fdadeddb3f7edaed8b7c9b Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 24 Mar 2021 11:25:39 +0100
+Subject: bpf: Move sanitize_val_alu out of op switch
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit f528819334881fd622fdadeddb3f7edaed8b7c9b upstream.
+
+Add a small sanitize_needed() helper function and move sanitize_val_alu()
+out of the main opcode switch. In upcoming work, we'll move sanitize_ptr_alu()
+as well out of its opcode switch so this helps to streamline both.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5473,6 +5473,11 @@ static int sanitize_val_alu(struct bpf_v
+ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+ }
+
++static bool sanitize_needed(u8 opcode)
++{
++ return opcode == BPF_ADD || opcode == BPF_SUB;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+@@ -6445,6 +6450,12 @@ static int adjust_scalar_min_max_vals(st
+ return 0;
+ }
+
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_val_alu(env, insn);
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, NULL, NULL);
++ }
++
+ /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
+ * There are two classes of instructions: The first class we track both
+ * alu32 and alu64 sign/unsigned bounds independently this provides the
+@@ -6461,17 +6472,11 @@ static int adjust_scalar_min_max_vals(st
+ */
+ switch (opcode) {
+ case BPF_ADD:
+- ret = sanitize_val_alu(env, insn);
+- if (ret < 0)
+- return sanitize_err(env, insn, ret, NULL, NULL);
+ scalar32_min_max_add(dst_reg, &src_reg);
+ scalar_min_max_add(dst_reg, &src_reg);
+ dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+ break;
+ case BPF_SUB:
+- ret = sanitize_val_alu(env, insn);
+- if (ret < 0)
+- return sanitize_err(env, insn, ret, NULL, NULL);
+ scalar32_min_max_sub(dst_reg, &src_reg);
+ scalar_min_max_sub(dst_reg, &src_reg);
+ dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
--- /dev/null
+From b658bbb844e28f1862867f37e8ca11a8e2aa94a3 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Tue, 23 Mar 2021 09:04:10 +0100
+Subject: bpf: Rework ptr_limit into alu_limit and add common error path
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit b658bbb844e28f1862867f37e8ca11a8e2aa94a3 upstream.
+
+Small refactor with no semantic changes in order to consolidate the max
+ptr_limit boundary check.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5386,12 +5386,12 @@ static struct bpf_insn_aux_data *cur_aux
+
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+- u32 *ptr_limit, u8 opcode)
++ u32 *alu_limit, u8 opcode)
+ {
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+- u32 off, max;
++ u32 off, max = 0, ptr_limit = 0;
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -5408,22 +5408,27 @@ static int retrieve_ptr_limit(const stru
+ */
+ off = ptr_reg->off + ptr_reg->var_off.value;
+ if (mask_to_left)
+- *ptr_limit = MAX_BPF_STACK + off;
++ ptr_limit = MAX_BPF_STACK + off;
+ else
+- *ptr_limit = -off - 1;
+- return *ptr_limit >= max ? -ERANGE : 0;
++ ptr_limit = -off - 1;
++ break;
+ case PTR_TO_MAP_VALUE:
+ max = ptr_reg->map_ptr->value_size;
+ if (mask_to_left) {
+- *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
++ ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+ } else {
+ off = ptr_reg->smin_value + ptr_reg->off;
+- *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
++ ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+ }
+- return *ptr_limit >= max ? -ERANGE : 0;
++ break;
+ default:
+ return -EINVAL;
+ }
++
++ if (ptr_limit >= max)
++ return -ERANGE;
++ *alu_limit = ptr_limit;
++ return 0;
+ }
+
+ static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
--- /dev/null
+From 7fedb63a8307dda0ec3b8969a3b233a1dd7ea8e0 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 24 Mar 2021 10:38:26 +0100
+Subject: bpf: Tighten speculative pointer arithmetic mask
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 7fedb63a8307dda0ec3b8969a3b233a1dd7ea8e0 upstream.
+
+This work tightens the offset mask we use for unprivileged pointer arithmetic
+in order to mitigate a corner case reported by Piotr and Benedict where in
+the speculative domain it is possible to advance, for example, the map value
+pointer by up to value_size-1 out-of-bounds in order to leak kernel memory
+via side-channel to user space.
+
+Before this change, the computed ptr_limit for retrieve_ptr_limit() helper
+represents largest valid distance when moving pointer to the right or left
+which is then fed as aux->alu_limit to generate masking instructions against
+the offset register. After the change, the derived aux->alu_limit represents
+the largest potential value of the offset register which we mask against which
+is just a narrower subset of the former limit.
+
+For minimal complexity, we call sanitize_ptr_alu() from 2 observation points
+in adjust_ptr_min_max_vals(), that is, before and after the simulated alu
+operation. In the first step, we retieve the alu_state and alu_limit before
+the operation as well as we branch-off a verifier path and push it to the
+verification stack as we did before which checks the dst_reg under truncation,
+in other words, when the speculative domain would attempt to move the pointer
+out-of-bounds.
+
+In the second step, we retrieve the new alu_limit and calculate the absolute
+distance between both. Moreover, we commit the alu_state and final alu_limit
+via update_alu_sanitation_state() to the env's instruction aux data, and bail
+out from there if there is a mismatch due to coming from different verification
+paths with different states.
+
+Reported-by: Piotr Krysiuk <piotras@gmail.com>
+Reported-by: Benedict Schlueter <benedict.schlueter@rub.de>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Tested-by: Benedict Schlueter <benedict.schlueter@rub.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 73 ++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 44 insertions(+), 29 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5399,7 +5399,7 @@ static int retrieve_ptr_limit(const stru
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+- u32 off, max = 0, ptr_limit = 0;
++ u32 max = 0, ptr_limit = 0;
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -5408,26 +5408,18 @@ static int retrieve_ptr_limit(const stru
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+ /* Offset 0 is out-of-bounds, but acceptable start for the
+- * left direction, see BPF_REG_FP.
++ * left direction, see BPF_REG_FP. Also, unknown scalar
++ * offset where we would need to deal with min/max bounds is
++ * currently prohibited for unprivileged.
+ */
+ max = MAX_BPF_STACK + mask_to_left;
+- /* Indirect variable offset stack access is prohibited in
+- * unprivileged mode so it's not handled here.
+- */
+- off = ptr_reg->off + ptr_reg->var_off.value;
+- if (mask_to_left)
+- ptr_limit = MAX_BPF_STACK + off;
+- else
+- ptr_limit = -off - 1;
++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+ break;
+ case PTR_TO_MAP_VALUE:
+ max = ptr_reg->map_ptr->value_size;
+- if (mask_to_left) {
+- ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+- } else {
+- off = ptr_reg->smin_value + ptr_reg->off;
+- ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+- }
++ ptr_limit = (mask_to_left ?
++ ptr_reg->smin_value :
++ ptr_reg->umax_value) + ptr_reg->off;
+ break;
+ default:
+ return REASON_TYPE;
+@@ -5482,10 +5474,12 @@ static int sanitize_ptr_alu(struct bpf_v
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+- struct bpf_reg_state *dst_reg)
++ struct bpf_reg_state *dst_reg,
++ struct bpf_insn_aux_data *tmp_aux,
++ const bool commit_window)
+ {
++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ struct bpf_verifier_state *vstate = env->cur_state;
+- struct bpf_insn_aux_data *aux = cur_aux(env);
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ u8 opcode = BPF_OP(insn->code);
+@@ -5504,18 +5498,33 @@ static int sanitize_ptr_alu(struct bpf_v
+ if (vstate->speculative)
+ goto do_sim;
+
+- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+- alu_state |= ptr_is_dst_reg ?
+- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+-
+ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ if (err < 0)
+ return err;
+
++ if (commit_window) {
++ /* In commit phase we narrow the masking window based on
++ * the observed pointer move after the simulated operation.
++ */
++ alu_state = tmp_aux->alu_state;
++ alu_limit = abs(tmp_aux->alu_limit - alu_limit);
++ } else {
++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++ alu_state |= ptr_is_dst_reg ?
++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
++ }
++
+ err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+ if (err < 0)
+ return err;
+ do_sim:
++ /* If we're in commit phase, we're done here given we already
++ * pushed the truncated dst_reg into the speculative verification
++ * stack.
++ */
++ if (commit_window)
++ return 0;
++
+ /* Simulate and find potential out-of-bounds access under
+ * speculative execution from truncation as a result of
+ * masking when off was not within expected range. If off
+@@ -5592,6 +5601,7 @@ static int adjust_ptr_min_max_vals(struc
+ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
++ struct bpf_insn_aux_data tmp_aux = {};
+ u8 opcode = BPF_OP(insn->code);
+ u32 dst = insn->dst_reg;
+ int ret;
+@@ -5658,12 +5668,15 @@ static int adjust_ptr_min_max_vals(struc
+ /* pointer types do not carry 32-bit bounds at the moment. */
+ __mark_reg32_unbounded(dst_reg);
+
+- switch (opcode) {
+- case BPF_ADD:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
++ &tmp_aux, false);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++ }
+
++ switch (opcode) {
++ case BPF_ADD:
+ /* We can take a fixed offset as long as it doesn't overflow
+ * the s32 'off' field
+ */
+@@ -5714,10 +5727,6 @@ static int adjust_ptr_min_max_vals(struc
+ }
+ break;
+ case BPF_SUB:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0)
+- return sanitize_err(env, insn, ret, off_reg, dst_reg);
+-
+ if (dst_reg == off_reg) {
+ /* scalar -= pointer. Creates an unknown scalar */
+ verbose(env, "R%d tried to subtract pointer from scalar\n",
+@@ -5800,6 +5809,12 @@ static int adjust_ptr_min_max_vals(struc
+
+ if (sanitize_check_bounds(env, insn, dst_reg) < 0)
+ return -EACCES;
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
++ &tmp_aux, true);
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++ }
+
+ return 0;
+ }
r8169-don-t-advertise-pause-in-jumbo-mode.patch
bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-al.patch
bpf-move-off_reg-into-sanitize_ptr_alu.patch
+arm-9071-1-uprobes-don-t-hook-on-thumb-instructions.patch
+bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch
+bpf-improve-verifier-error-messages-for-users.patch
+bpf-move-sanitize_val_alu-out-of-op-switch.patch
+bpf-tighten-speculative-pointer-arithmetic-mask.patch