]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 May 2023 06:57:37 +0000 (08:57 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 May 2023 06:57:37 +0000 (08:57 +0200)
added patches:
arm64-always-load-shadow-stack-pointer-directly-from-the-task-struct.patch
arm64-stash-shadow-stack-pointer-in-the-task-struct-on-interrupt.patch

queue-5.10/arm64-always-load-shadow-stack-pointer-directly-from-the-task-struct.patch [new file with mode: 0644]
queue-5.10/arm64-stash-shadow-stack-pointer-in-the-task-struct-on-interrupt.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-always-load-shadow-stack-pointer-directly-from-the-task-struct.patch b/queue-5.10/arm64-always-load-shadow-stack-pointer-directly-from-the-task-struct.patch
new file mode 100644 (file)
index 0000000..ca4c9ef
--- /dev/null
@@ -0,0 +1,83 @@
+From 2198d07c509f1db4a1185d1f65aaada794c6ea59 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Mon, 9 Jan 2023 18:47:59 +0100
+Subject: arm64: Always load shadow stack pointer directly from the task struct
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 2198d07c509f1db4a1185d1f65aaada794c6ea59 upstream.
+
+All occurrences of the scs_load macro load the value of the shadow call
+stack pointer from the task which is current at that point. So instead
+of taking a task struct register argument in the scs_load macro to
+specify the task struct to load from, let's always reference the current
+task directly. This should make it much harder to exploit any
+instruction sequences reloading the shadow call stack pointer register
+from memory.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/scs.h |    7 ++++---
+ arch/arm64/kernel/entry.S    |    4 ++--
+ arch/arm64/kernel/head.S     |    2 +-
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/scs.h
++++ b/arch/arm64/include/asm/scs.h
+@@ -9,15 +9,16 @@
+ #ifdef CONFIG_SHADOW_CALL_STACK
+       scs_sp  .req    x18
+-      .macro scs_load tsk, tmp
+-      ldr     scs_sp, [\tsk, #TSK_TI_SCS_SP]
++      .macro scs_load_current
++      get_current_task scs_sp
++      ldr     scs_sp, [scs_sp, #TSK_TI_SCS_SP]
+       .endm
+       .macro scs_save tsk, tmp
+       str     scs_sp, [\tsk, #TSK_TI_SCS_SP]
+       .endm
+ #else
+-      .macro scs_load tsk, tmp
++      .macro scs_load_current
+       .endm
+       .macro scs_save tsk, tmp
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -221,7 +221,7 @@ alternative_else_nop_endif
+       ptrauth_keys_install_kernel tsk, x20, x22, x23
+-      scs_load tsk, x20
++      scs_load_current
+       .else
+       add     x21, sp, #S_FRAME_SIZE
+       get_current_task tsk
+@@ -1025,7 +1025,7 @@ SYM_FUNC_START(cpu_switch_to)
+       msr     sp_el0, x1
+       ptrauth_keys_install_kernel x1, x8, x9, x10
+       scs_save x0, x8
+-      scs_load x1, x8
++      scs_load_current
+       ret
+ SYM_FUNC_END(cpu_switch_to)
+ NOKPROBE(cpu_switch_to)
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -747,7 +747,7 @@ SYM_FUNC_START_LOCAL(__secondary_switche
+       ldr     x2, [x0, #CPU_BOOT_TASK]
+       cbz     x2, __secondary_too_slow
+       msr     sp_el0, x2
+-      scs_load x2, x3
++      scs_load_current
+       mov     x29, #0
+       mov     x30, #0
diff --git a/queue-5.10/arm64-stash-shadow-stack-pointer-in-the-task-struct-on-interrupt.patch b/queue-5.10/arm64-stash-shadow-stack-pointer-in-the-task-struct-on-interrupt.patch
new file mode 100644 (file)
index 0000000..eaaf63e
--- /dev/null
@@ -0,0 +1,61 @@
+From 59b37fe52f49955791a460752c37145f1afdcad1 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Mon, 9 Jan 2023 18:48:00 +0100
+Subject: arm64: Stash shadow stack pointer in the task struct on interrupt
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 59b37fe52f49955791a460752c37145f1afdcad1 upstream.
+
+Instead of reloading the shadow call stack pointer from the ordinary
+stack, which may be vulnerable to the kind of gadget based attacks
+shadow call stacks were designed to prevent, let's store a task's shadow
+call stack pointer in the task struct when switching to the shadow IRQ
+stack.
+
+Given that currently, the task_struct::scs_sp field is only used to
+preserve the shadow call stack pointer while a task is scheduled out or
+running in user space, reusing this field to preserve and restore it
+while running off the IRQ stack must be safe, as those occurrences are
+guaranteed to never overlap. (The stack switching logic only switches
+stacks when running from the task stack, and so the value being saved
+here always corresponds to the task mode shadow stack)
+
+While at it, fold a mov/add/mov sequence into a single add.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20230109174800.3286265-3-ardb@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ardb: v5.10 backport, which doesn't have call_on_irq_stack() yet *]
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -431,9 +431,7 @@ SYM_CODE_END(__swpan_exit_el0)
+       .macro  irq_stack_entry
+       mov     x19, sp                 // preserve the original sp
+-#ifdef CONFIG_SHADOW_CALL_STACK
+-      mov     x24, scs_sp             // preserve the original shadow stack
+-#endif
++      scs_save tsk                    // preserve the original shadow stack
+       /*
+        * Compare sp with the base of the task stack.
+@@ -467,9 +465,7 @@ SYM_CODE_END(__swpan_exit_el0)
+        */
+       .macro  irq_stack_exit
+       mov     sp, x19
+-#ifdef CONFIG_SHADOW_CALL_STACK
+-      mov     scs_sp, x24
+-#endif
++      scs_load_current
+       .endm
+ /* GPRs used by entry code */
index c37db769a1146d79cee7ecd0ab33573d92271a8a..059e9447810c0e1badcefc7e6c60a461ef5d862a 100644 (file)
@@ -287,3 +287,5 @@ dm-flakey-fix-a-crash-with-invalid-table-line.patch
 dm-ioctl-fix-nested-locking-in-table_clear-to-remove-deadlock-concern.patch
 perf-auxtrace-fix-address-filter-entire-kernel-size.patch
 perf-intel-pt-fix-cyc-timestamps-after-standalone-cbr.patch
+arm64-always-load-shadow-stack-pointer-directly-from-the-task-struct.patch
+arm64-stash-shadow-stack-pointer-in-the-task-struct-on-interrupt.patch