From: Breno Leitao Date: Mon, 7 Jul 2025 16:01:08 +0000 (-0700) Subject: arm64: remove CONFIG_VMAP_STACK checks from entry code X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9d1869f0f537d26005a521a141dde759fc3303f5;p=thirdparty%2Flinux.git arm64: remove CONFIG_VMAP_STACK checks from entry code With VMAP_STACK now always enabled on arm64, remove all CONFIG_VMAP_STACK conditionals from entry handling in arch/arm64/kernel/entry-common.c and arch/arm64/kernel/entry.S. This change unconditionally includes the bad stack handling and overflow detection logic, simplifying the code and reflecting the mandatory use of VMAP_STACK for all arm64 kernel builds. Signed-off-by: Breno Leitao Acked-by: Ard Biesheuvel Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250707-arm64_vmap-v1-8-8de98ca0f91c@debian.org Signed-off-by: Will Deacon --- diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 7c1970b341b8c..99a341ee71313 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -977,7 +977,6 @@ UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_COMPAT */ -#ifdef CONFIG_VMAP_STACK asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -986,7 +985,6 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) arm64_enter_nmi(regs); panic_bad_stack(regs, esr, far); } -#endif /* CONFIG_VMAP_STACK */ #ifdef CONFIG_ARM_SDE_INTERFACE asmlinkage noinstr unsigned long diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 5ae2a34b50bda..ea74cb7aac5b8 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -55,7 +55,6 @@ .endif sub sp, sp, #PT_REGS_SIZE -#ifdef CONFIG_VMAP_STACK /* * Test whether the SP has overflowed, without corrupting a GPR. * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) @@ -97,7 +96,6 @@ /* We were already on the overflow stack. Restore sp/x0 and carry on. */ sub sp, sp, x0 mrs x0, tpidrro_el0 -#endif b el\el\ht\()_\regsize\()_\label .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm @@ -540,7 +538,6 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) -#ifdef CONFIG_VMAP_STACK SYM_CODE_START_LOCAL(__bad_stack) /* * We detected an overflow in kernel_ventry, which switched to the @@ -568,7 +565,6 @@ SYM_CODE_START_LOCAL(__bad_stack) bl handle_bad_stack ASM_BUG() SYM_CODE_END(__bad_stack) -#endif /* CONFIG_VMAP_STACK */ .macro entry_handler el:req, ht:req, regsize:req, label:req @@ -1003,7 +999,6 @@ SYM_CODE_START(__sdei_asm_handler) 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 2: str x19, [x5] -#ifdef CONFIG_VMAP_STACK /* * entry.S may have been using sp as a scratch register, find whether * this is a normal or critical event and switch to the appropriate @@ -1016,7 +1011,6 @@ SYM_CODE_START(__sdei_asm_handler) 2: mov x6, #SDEI_STACK_SIZE add x5, x5, x6 mov sp, x5 -#endif #ifdef CONFIG_SHADOW_CALL_STACK /* Use a separate shadow call stack for normal and critical events */