]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Apr 2021 12:24:46 +0000 (14:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 19 Apr 2021 12:24:46 +0000 (14:24 +0200)
added patches:
arm-9071-1-uprobes-don-t-hook-on-thumb-instructions.patch
arm64-mte-ensure-tif_mte_async_fault-is-set-atomically.patch
bpf-improve-verifier-error-messages-for-users.patch
bpf-move-sanitize_val_alu-out-of-op-switch.patch
bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch

queue-5.10/arm-9071-1-uprobes-don-t-hook-on-thumb-instructions.patch [new file with mode: 0644]
queue-5.10/arm64-mte-ensure-tif_mte_async_fault-is-set-atomically.patch [new file with mode: 0644]
queue-5.10/bpf-improve-verifier-error-messages-for-users.patch [new file with mode: 0644]
queue-5.10/bpf-move-sanitize_val_alu-out-of-op-switch.patch [new file with mode: 0644]
queue-5.10/bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm-9071-1-uprobes-don-t-hook-on-thumb-instructions.patch b/queue-5.10/arm-9071-1-uprobes-don-t-hook-on-thumb-instructions.patch
new file mode 100644 (file)
index 0000000..afd5f7c
--- /dev/null
@@ -0,0 +1,48 @@
+From d2f7eca60b29006285d57c7035539e33300e89e5 Mon Sep 17 00:00:00 2001
+From: Fredrik Strupe <fredrik@strupe.net>
+Date: Mon, 5 Apr 2021 21:52:05 +0100
+Subject: ARM: 9071/1: uprobes: Don't hook on thumb instructions
+
+From: Fredrik Strupe <fredrik@strupe.net>
+
+commit d2f7eca60b29006285d57c7035539e33300e89e5 upstream.
+
+Since uprobes is not supported for thumb, check that the thumb bit is
+not set when matching the uprobes instruction hooks.
+
+The Arm UDF instructions used for uprobes triggering
+(UPROBE_SWBP_ARM_INSN and UPROBE_SS_ARM_INSN) coincidentally share the
+same encoding as a pair of unallocated 32-bit thumb instructions (not
+UDF) when the condition code is 0b1111 (0xf). This in effect makes it
+possible to trigger the uprobes functionality from thumb, and at that
+using two unallocated instructions which are not permanently undefined.
+
+Signed-off-by: Fredrik Strupe <fredrik@strupe.net>
+Cc: stable@vger.kernel.org
+Fixes: c7edc9e326d5 ("ARM: add uprobes support")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/probes/uprobes/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/probes/uprobes/core.c
++++ b/arch/arm/probes/uprobes/core.c
+@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struc
+ static struct undef_hook uprobes_arm_break_hook = {
+       .instr_mask     = 0x0fffffff,
+       .instr_val      = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
+-      .cpsr_mask      = MODE_MASK,
++      .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
+       .cpsr_val       = USR_MODE,
+       .fn             = uprobe_trap_handler,
+ };
+@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_bre
+ static struct undef_hook uprobes_arm_ss_hook = {
+       .instr_mask     = 0x0fffffff,
+       .instr_val      = (UPROBE_SS_ARM_INSN & 0x0fffffff),
+-      .cpsr_mask      = MODE_MASK,
++      .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
+       .cpsr_val       = USR_MODE,
+       .fn             = uprobe_trap_handler,
+ };
diff --git a/queue-5.10/arm64-mte-ensure-tif_mte_async_fault-is-set-atomically.patch b/queue-5.10/arm64-mte-ensure-tif_mte_async_fault-is-set-atomically.patch
new file mode 100644 (file)
index 0000000..68ec586
--- /dev/null
@@ -0,0 +1,92 @@
+From 2decad92f4731fac9755a083fcfefa66edb7d67d Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Fri, 9 Apr 2021 18:37:10 +0100
+Subject: arm64: mte: Ensure TIF_MTE_ASYNC_FAULT is set atomically
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 2decad92f4731fac9755a083fcfefa66edb7d67d upstream.
+
+The entry from EL0 code checks the TFSRE0_EL1 register for any
+asynchronous tag check faults in user space and sets the
+TIF_MTE_ASYNC_FAULT flag. This is not done atomically, potentially
+racing with another CPU calling set_tsk_thread_flag().
+
+Replace the non-atomic ORR+STR with an STSET instruction. While STSET
+requires ARMv8.1 and an assembler that understands LSE atomics, the MTE
+feature is part of ARMv8.5 and already requires an updated assembler.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 637ec831ea4f ("arm64: mte: Handle synchronous and asynchronous tag check faults")
+Cc: <stable@vger.kernel.org> # 5.10.x
+Reported-by: Will Deacon <will@kernel.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20210409173710.18582-1-catalin.marinas@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/Kconfig        |    6 +++++-
+ arch/arm64/kernel/entry.S |   10 ++++++----
+ 2 files changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1390,10 +1390,13 @@ config ARM64_PAN
+        The feature is detected at runtime, and will remain as a 'nop'
+        instruction if the cpu does not implement the feature.
++config AS_HAS_LSE_ATOMICS
++      def_bool $(as-instr,.arch_extension lse)
++
+ config ARM64_LSE_ATOMICS
+       bool
+       default ARM64_USE_LSE_ATOMICS
+-      depends on $(as-instr,.arch_extension lse)
++      depends on AS_HAS_LSE_ATOMICS
+ config ARM64_USE_LSE_ATOMICS
+       bool "Atomic instructions"
+@@ -1667,6 +1670,7 @@ config ARM64_MTE
+       bool "Memory Tagging Extension support"
+       default y
+       depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
++      depends on AS_HAS_LSE_ATOMICS
+       select ARCH_USES_HIGH_VMA_FLAGS
+       help
+         Memory Tagging (part of the ARMv8.5 Extensions) provides
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -148,16 +148,18 @@ alternative_cb_end
+       .endm
+       /* Check for MTE asynchronous tag check faults */
+-      .macro check_mte_async_tcf, flgs, tmp
++      .macro check_mte_async_tcf, tmp, ti_flags
+ #ifdef CONFIG_ARM64_MTE
++      .arch_extension lse
+ alternative_if_not ARM64_MTE
+       b       1f
+ alternative_else_nop_endif
+       mrs_s   \tmp, SYS_TFSRE0_EL1
+       tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+       /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+-      orr     \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
+-      str     \flgs, [tsk, #TSK_TI_FLAGS]
++      mov     \tmp, #_TIF_MTE_ASYNC_FAULT
++      add     \ti_flags, tsk, #TSK_TI_FLAGS
++      stset   \tmp, [\ti_flags]
+       msr_s   SYS_TFSRE0_EL1, xzr
+ 1:
+ #endif
+@@ -207,7 +209,7 @@ alternative_else_nop_endif
+       disable_step_tsk x19, x20
+       /* Check for asynchronous tag check faults in user space */
+-      check_mte_async_tcf x19, x22
++      check_mte_async_tcf x22, x23
+       apply_ssbd 1, x22, x23
+       ptrauth_keys_install_kernel tsk, x20, x22, x23
diff --git a/queue-5.10/bpf-improve-verifier-error-messages-for-users.patch b/queue-5.10/bpf-improve-verifier-error-messages-for-users.patch
new file mode 100644 (file)
index 0000000..5d520f0
--- /dev/null
@@ -0,0 +1,181 @@
+From a6aaece00a57fa6f22575364b3903dfbccf5345d Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Tue, 23 Mar 2021 09:30:01 +0100
+Subject: bpf: Improve verifier error messages for users
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit a6aaece00a57fa6f22575364b3903dfbccf5345d upstream.
+
+Consolidate all error handling and provide more user-friendly error messages
+from sanitize_ptr_alu() and sanitize_val_alu().
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   86 ++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 63 insertions(+), 23 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5328,6 +5328,14 @@ static struct bpf_insn_aux_data *cur_aux
+       return &env->insn_aux_data[env->insn_idx];
+ }
++enum {
++      REASON_BOUNDS   = -1,
++      REASON_TYPE     = -2,
++      REASON_PATHS    = -3,
++      REASON_LIMIT    = -4,
++      REASON_STACK    = -5,
++};
++
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+                             const struct bpf_reg_state *off_reg,
+                             u32 *alu_limit, u8 opcode)
+@@ -5339,7 +5347,7 @@ static int retrieve_ptr_limit(const stru
+       if (!tnum_is_const(off_reg->var_off) &&
+           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+-              return -EACCES;
++              return REASON_BOUNDS;
+       switch (ptr_reg->type) {
+       case PTR_TO_STACK:
+@@ -5366,11 +5374,11 @@ static int retrieve_ptr_limit(const stru
+               }
+               break;
+       default:
+-              return -EINVAL;
++              return REASON_TYPE;
+       }
+       if (ptr_limit >= max)
+-              return -ERANGE;
++              return REASON_LIMIT;
+       *alu_limit = ptr_limit;
+       return 0;
+ }
+@@ -5390,7 +5398,7 @@ static int update_alu_sanitation_state(s
+       if (aux->alu_state &&
+           (aux->alu_state != alu_state ||
+            aux->alu_limit != alu_limit))
+-              return -EACCES;
++              return REASON_PATHS;
+       /* Corresponding fixup done in fixup_bpf_calls(). */
+       aux->alu_state = alu_state;
+@@ -5463,7 +5471,46 @@ do_sim:
+       ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+       if (!ptr_is_dst_reg && ret)
+               *dst_reg = tmp;
+-      return !ret ? -EFAULT : 0;
++      return !ret ? REASON_STACK : 0;
++}
++
++static int sanitize_err(struct bpf_verifier_env *env,
++                      const struct bpf_insn *insn, int reason,
++                      const struct bpf_reg_state *off_reg,
++                      const struct bpf_reg_state *dst_reg)
++{
++      static const char *err = "pointer arithmetic with it prohibited for !root";
++      const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
++      u32 dst = insn->dst_reg, src = insn->src_reg;
++
++      switch (reason) {
++      case REASON_BOUNDS:
++              verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
++                      off_reg == dst_reg ? dst : src, err);
++              break;
++      case REASON_TYPE:
++              verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
++                      off_reg == dst_reg ? src : dst, err);
++              break;
++      case REASON_PATHS:
++              verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
++                      dst, op, err);
++              break;
++      case REASON_LIMIT:
++              verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
++                      dst, op, err);
++              break;
++      case REASON_STACK:
++              verbose(env, "R%d could not be pushed for speculative verification, %s\n",
++                      dst, err);
++              break;
++      default:
++              verbose(env, "verifier internal error: unknown reason (%d)\n",
++                      reason);
++              break;
++      }
++
++      return -EACCES;
+ }
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+@@ -5553,10 +5600,9 @@ static int adjust_ptr_min_max_vals(struc
+       switch (opcode) {
+       case BPF_ADD:
+               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
+-                      return ret;
+-              }
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+               /* We can take a fixed offset as long as it doesn't overflow
+                * the s32 'off' field
+                */
+@@ -5608,10 +5654,9 @@ static int adjust_ptr_min_max_vals(struc
+               break;
+       case BPF_SUB:
+               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
+-                      return ret;
+-              }
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+               if (dst_reg == off_reg) {
+                       /* scalar -= pointer.  Creates an unknown scalar */
+                       verbose(env, "R%d tried to subtract pointer from scalar\n",
+@@ -6301,9 +6346,8 @@ static int adjust_scalar_min_max_vals(st
+       s32 s32_min_val, s32_max_val;
+       u32 u32_min_val, u32_max_val;
+       u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+-      u32 dst = insn->dst_reg;
+-      int ret;
+       bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
++      int ret;
+       smin_val = src_reg.smin_value;
+       smax_val = src_reg.smax_value;
+@@ -6362,20 +6406,16 @@ static int adjust_scalar_min_max_vals(st
+       switch (opcode) {
+       case BPF_ADD:
+               ret = sanitize_val_alu(env, insn);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+-                      return ret;
+-              }
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, NULL, NULL);
+               scalar32_min_max_add(dst_reg, &src_reg);
+               scalar_min_max_add(dst_reg, &src_reg);
+               dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+               break;
+       case BPF_SUB:
+               ret = sanitize_val_alu(env, insn);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+-                      return ret;
+-              }
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, NULL, NULL);
+               scalar32_min_max_sub(dst_reg, &src_reg);
+               scalar_min_max_sub(dst_reg, &src_reg);
+               dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
diff --git a/queue-5.10/bpf-move-sanitize_val_alu-out-of-op-switch.patch b/queue-5.10/bpf-move-sanitize_val_alu-out-of-op-switch.patch
new file mode 100644 (file)
index 0000000..528eab6
--- /dev/null
@@ -0,0 +1,66 @@
+From f528819334881fd622fdadeddb3f7edaed8b7c9b Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 24 Mar 2021 11:25:39 +0100
+Subject: bpf: Move sanitize_val_alu out of op switch
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit f528819334881fd622fdadeddb3f7edaed8b7c9b upstream.
+
+Add a small sanitize_needed() helper function and move sanitize_val_alu()
+out of the main opcode switch. In upcoming work, we'll move sanitize_ptr_alu()
+as well out of its opcode switch so this helps to streamline both.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5417,6 +5417,11 @@ static int sanitize_val_alu(struct bpf_v
+       return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+ }
++static bool sanitize_needed(u8 opcode)
++{
++      return opcode == BPF_ADD || opcode == BPF_SUB;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+                           struct bpf_insn *insn,
+                           const struct bpf_reg_state *ptr_reg,
+@@ -6389,6 +6394,12 @@ static int adjust_scalar_min_max_vals(st
+               return 0;
+       }
++      if (sanitize_needed(opcode)) {
++              ret = sanitize_val_alu(env, insn);
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, NULL, NULL);
++      }
++
+       /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
+        * There are two classes of instructions: The first class we track both
+        * alu32 and alu64 sign/unsigned bounds independently this provides the
+@@ -6405,17 +6416,11 @@ static int adjust_scalar_min_max_vals(st
+        */
+       switch (opcode) {
+       case BPF_ADD:
+-              ret = sanitize_val_alu(env, insn);
+-              if (ret < 0)
+-                      return sanitize_err(env, insn, ret, NULL, NULL);
+               scalar32_min_max_add(dst_reg, &src_reg);
+               scalar_min_max_add(dst_reg, &src_reg);
+               dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+               break;
+       case BPF_SUB:
+-              ret = sanitize_val_alu(env, insn);
+-              if (ret < 0)
+-                      return sanitize_err(env, insn, ret, NULL, NULL);
+               scalar32_min_max_sub(dst_reg, &src_reg);
+               scalar_min_max_sub(dst_reg, &src_reg);
+               dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
diff --git a/queue-5.10/bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch b/queue-5.10/bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch
new file mode 100644 (file)
index 0000000..9bedd3a
--- /dev/null
@@ -0,0 +1,71 @@
+From b658bbb844e28f1862867f37e8ca11a8e2aa94a3 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Tue, 23 Mar 2021 09:04:10 +0100
+Subject: bpf: Rework ptr_limit into alu_limit and add common error path
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit b658bbb844e28f1862867f37e8ca11a8e2aa94a3 upstream.
+
+Small refactor with no semantic changes in order to consolidate the max
+ptr_limit boundary check.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5330,12 +5330,12 @@ static struct bpf_insn_aux_data *cur_aux
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+                             const struct bpf_reg_state *off_reg,
+-                            u32 *ptr_limit, u8 opcode)
++                            u32 *alu_limit, u8 opcode)
+ {
+       bool off_is_neg = off_reg->smin_value < 0;
+       bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+                           (opcode == BPF_SUB && !off_is_neg);
+-      u32 off, max;
++      u32 off, max = 0, ptr_limit = 0;
+       if (!tnum_is_const(off_reg->var_off) &&
+           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -5352,22 +5352,27 @@ static int retrieve_ptr_limit(const stru
+                */
+               off = ptr_reg->off + ptr_reg->var_off.value;
+               if (mask_to_left)
+-                      *ptr_limit = MAX_BPF_STACK + off;
++                      ptr_limit = MAX_BPF_STACK + off;
+               else
+-                      *ptr_limit = -off - 1;
+-              return *ptr_limit >= max ? -ERANGE : 0;
++                      ptr_limit = -off - 1;
++              break;
+       case PTR_TO_MAP_VALUE:
+               max = ptr_reg->map_ptr->value_size;
+               if (mask_to_left) {
+-                      *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
++                      ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+               } else {
+                       off = ptr_reg->smin_value + ptr_reg->off;
+-                      *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
++                      ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+               }
+-              return *ptr_limit >= max ? -ERANGE : 0;
++              break;
+       default:
+               return -EINVAL;
+       }
++
++      if (ptr_limit >= max)
++              return -ERANGE;
++      *alu_limit = ptr_limit;
++      return 0;
+ }
+ static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
index 4a6b1ae7c2e35810fcc05a029bacc87821a4e2f2..ec5519323869e30b2849d61c7e6b997653659ff9 100644 (file)
@@ -96,3 +96,8 @@ r8169-tweak-max-read-request-size-for-newer-chips-al.patch
 r8169-don-t-advertise-pause-in-jumbo-mode.patch
 bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-al.patch
 bpf-move-off_reg-into-sanitize_ptr_alu.patch
+arm-9071-1-uprobes-don-t-hook-on-thumb-instructions.patch
+arm64-mte-ensure-tif_mte_async_fault-is-set-atomically.patch
+bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch
+bpf-improve-verifier-error-messages-for-users.patch
+bpf-move-sanitize_val_alu-out-of-op-switch.patch