--- /dev/null
+From xiangyang3@huawei.com Tue Feb 20 16:30:54 2024
+From: Xiang Yang <xiangyang3@huawei.com>
+Date: Mon, 19 Feb 2024 21:21:53 +0800
+Subject: Revert "arm64: Stash shadow stack pointer in the task struct on interrupt"
+To: <ardb@kernel.org>, <mark.rutland@arm.com>, <catalin.marinas@arm.com>, <will@kernel.org>
+Cc: <keescook@chromium.org>, <linux-arm-kernel@lists.infradead.org>, <stable@vger.kernel.org>, <gregkh@linuxfoundation.org>, <xiangyang3@huawei.com>, <xiujianfeng@huawei.com>, <liaochang1@huawei.com>
+Message-ID: <20240219132153.378265-1-xiangyang3@huawei.com>
+
+
+This reverts commit 3f225f29c69c13ce1cbdb1d607a42efeef080056 which is
+commit 59b37fe52f49955791a460752c37145f1afdcad1 upstream.
+
+The shadow call stack for irq now is stored in current task's thread info
+in irq_stack_entry. There is a possibility that we have some soft irqs
+pending at the end of hard irq, and when we process softirq with the irq
+enabled, irq_stack_entry will enter again and overwrite the shadow call
+stack whitch stored in current task's thread info, leading to the
+incorrect shadow call stack restoration for the first entry of the hard
+IRQ, then the system end up with a panic.
+
+task A | task A
+-------------------------------------+------------------------------------
+el1_irq //irq1 enter |
+ irq_handler //save scs_sp1 |
+ gic_handle_irq |
+ irq_exit |
+ __do_softirq |
+ | el1_irq //irq2 enter
+ | irq_handler //save scs_sp2
+ | //overwrite scs_sp1
+ | ...
+ | irq_stack_exit //restore scs_sp2
+ irq_stack_exit //restore wrong |
+ //scs_sp2 |
+
+So revert this commit to fix it.
+
+Fixes: 3f225f29c69c ("arm64: Stash shadow stack pointer in the task struct on interrupt")
+Signed-off-by: Xiang Yang <xiangyang3@huawei.com>
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -431,7 +431,9 @@ SYM_CODE_END(__swpan_exit_el0)
+
+ .macro irq_stack_entry
+ mov x19, sp // preserve the original sp
+- scs_save tsk // preserve the original shadow stack
++#ifdef CONFIG_SHADOW_CALL_STACK
++ mov x24, scs_sp // preserve the original shadow stack
++#endif
+
+ /*
+ * Compare sp with the base of the task stack.
+@@ -465,7 +467,9 @@ SYM_CODE_END(__swpan_exit_el0)
+ */
+ .macro irq_stack_exit
+ mov sp, x19
+- scs_load_current
++#ifdef CONFIG_SHADOW_CALL_STACK
++ mov scs_sp, x24
++#endif
+ .endm
+
+ /* GPRs used by entry code */