--- /dev/null
+From 2198d07c509f1db4a1185d1f65aaada794c6ea59 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Mon, 9 Jan 2023 18:47:59 +0100
+Subject: arm64: Always load shadow stack pointer directly from the task struct
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 2198d07c509f1db4a1185d1f65aaada794c6ea59 upstream.
+
+All occurrences of the scs_load macro load the value of the shadow call
+stack pointer from the task which is current at that point. So instead
+of taking a task struct register argument in the scs_load macro to
+specify the task struct to load from, let's always reference the current
+task directly. This should make it much harder to exploit any
+instruction sequences reloading the shadow call stack pointer register
+from memory.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/scs.h | 7 ++++---
+ arch/arm64/kernel/entry.S | 4 ++--
+ arch/arm64/kernel/head.S | 2 +-
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/scs.h
++++ b/arch/arm64/include/asm/scs.h
+@@ -9,15 +9,16 @@
+ #ifdef CONFIG_SHADOW_CALL_STACK
+ scs_sp .req x18
+
+- .macro scs_load tsk, tmp
+- ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
++ .macro scs_load_current
++ get_current_task scs_sp
++ ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP]
+ .endm
+
+ .macro scs_save tsk, tmp
+ str scs_sp, [\tsk, #TSK_TI_SCS_SP]
+ .endm
+ #else
+- .macro scs_load tsk, tmp
++ .macro scs_load_current
+ .endm
+
+ .macro scs_save tsk, tmp
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -221,7 +221,7 @@ alternative_else_nop_endif
+
+ ptrauth_keys_install_kernel tsk, x20, x22, x23
+
+- scs_load tsk, x20
++ scs_load_current
+ .else
+ add x21, sp, #S_FRAME_SIZE
+ get_current_task tsk
+@@ -1025,7 +1025,7 @@ SYM_FUNC_START(cpu_switch_to)
+ msr sp_el0, x1
+ ptrauth_keys_install_kernel x1, x8, x9, x10
+ scs_save x0, x8
+- scs_load x1, x8
++ scs_load_current
+ ret
+ SYM_FUNC_END(cpu_switch_to)
+ NOKPROBE(cpu_switch_to)
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -747,7 +747,7 @@ SYM_FUNC_START_LOCAL(__secondary_switche
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ cbz x2, __secondary_too_slow
+ msr sp_el0, x2
+- scs_load x2, x3
++ scs_load_current
+ mov x29, #0
+ mov x30, #0
+
--- /dev/null
+From 59b37fe52f49955791a460752c37145f1afdcad1 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Mon, 9 Jan 2023 18:48:00 +0100
+Subject: arm64: Stash shadow stack pointer in the task struct on interrupt
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit 59b37fe52f49955791a460752c37145f1afdcad1 upstream.
+
+Instead of reloading the shadow call stack pointer from the ordinary
+stack, which may be vulnerable to the kind of gadget based attacks
+shadow call stacks were designed to prevent, let's store a task's shadow
+call stack pointer in the task struct when switching to the shadow IRQ
+stack.
+
+Given that currently, the task_struct::scs_sp field is only used to
+preserve the shadow call stack pointer while a task is scheduled out or
+running in user space, reusing this field to preserve and restore it
+while running off the IRQ stack must be safe, as those occurrences are
+guaranteed to never overlap. (The stack switching logic only switches
+stacks when running from the task stack, and so the value being saved
+here always corresponds to the task mode shadow stack)
+
+While at it, fold a mov/add/mov sequence into a single add.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20230109174800.3286265-3-ardb@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ardb: v5.10 backport, which doesn't have call_on_irq_stack() yet *]
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -431,9 +431,7 @@ SYM_CODE_END(__swpan_exit_el0)
+
+ .macro irq_stack_entry
+ mov x19, sp // preserve the original sp
+-#ifdef CONFIG_SHADOW_CALL_STACK
+- mov x24, scs_sp // preserve the original shadow stack
+-#endif
++ scs_save tsk // preserve the original shadow stack
+
+ /*
+ * Compare sp with the base of the task stack.
+@@ -467,9 +465,7 @@ SYM_CODE_END(__swpan_exit_el0)
+ */
+ .macro irq_stack_exit
+ mov sp, x19
+-#ifdef CONFIG_SHADOW_CALL_STACK
+- mov scs_sp, x24
+-#endif
++ scs_load_current
+ .endm
+
+ /* GPRs used by entry code */