]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
arm64: remove CONFIG_VMAP_STACK checks from entry code
authorBreno Leitao <leitao@debian.org>
Mon, 7 Jul 2025 16:01:08 +0000 (09:01 -0700)
committerWill Deacon <will@kernel.org>
Tue, 8 Jul 2025 12:41:09 +0000 (13:41 +0100)
With VMAP_STACK now always enabled on arm64, remove all CONFIG_VMAP_STACK
conditionals from entry handling in arch/arm64/kernel/entry-common.c and
arch/arm64/kernel/entry.S.

This change unconditionally includes the bad stack handling and overflow
detection logic, simplifying the code and reflecting the mandatory use of
VMAP_STACK for all arm64 kernel builds.

Signed-off-by: Breno Leitao <leitao@debian.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707-arm64_vmap-v1-8-8de98ca0f91c@debian.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S

index 7c1970b341b8cbdef1b8f6fcd3b9143ec396bf3e..99a341ee713137b3f8e4bcf114508cada2857988 100644 (file)
@@ -977,7 +977,6 @@ UNHANDLED(el0t, 32, fiq)
 UNHANDLED(el0t, 32, error)
 #endif /* CONFIG_COMPAT */
 
-#ifdef CONFIG_VMAP_STACK
 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
@@ -986,7 +985,6 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
        arm64_enter_nmi(regs);
        panic_bad_stack(regs, esr, far);
 }
-#endif /* CONFIG_VMAP_STACK */
 
 #ifdef CONFIG_ARM_SDE_INTERFACE
 asmlinkage noinstr unsigned long
index 5ae2a34b50bda55a6972a412e284faa482388f95..ea74cb7aac5b89210839ff7ca85273429544a6be 100644 (file)
@@ -55,7 +55,6 @@
        .endif
 
        sub     sp, sp, #PT_REGS_SIZE
-#ifdef CONFIG_VMAP_STACK
        /*
         * Test whether the SP has overflowed, without corrupting a GPR.
         * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
@@ -97,7 +96,6 @@
        /* We were already on the overflow stack. Restore sp/x0 and carry on. */
        sub     sp, sp, x0
        mrs     x0, tpidrro_el0
-#endif
        b       el\el\ht\()_\regsize\()_\label
 .org .Lventry_start\@ + 128    // Did we overflow the ventry slot?
        .endm
@@ -540,7 +538,6 @@ SYM_CODE_START(vectors)
        kernel_ventry   0, t, 32, error         // Error 32-bit EL0
 SYM_CODE_END(vectors)
 
-#ifdef CONFIG_VMAP_STACK
 SYM_CODE_START_LOCAL(__bad_stack)
        /*
         * We detected an overflow in kernel_ventry, which switched to the
@@ -568,7 +565,6 @@ SYM_CODE_START_LOCAL(__bad_stack)
        bl      handle_bad_stack
        ASM_BUG()
 SYM_CODE_END(__bad_stack)
-#endif /* CONFIG_VMAP_STACK */
 
 
        .macro entry_handler el:req, ht:req, regsize:req, label:req
@@ -1003,7 +999,6 @@ SYM_CODE_START(__sdei_asm_handler)
 1:     adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
 2:     str     x19, [x5]
 
-#ifdef CONFIG_VMAP_STACK
        /*
         * entry.S may have been using sp as a scratch register, find whether
         * this is a normal or critical event and switch to the appropriate
@@ -1016,7 +1011,6 @@ SYM_CODE_START(__sdei_asm_handler)
 2:     mov     x6, #SDEI_STACK_SIZE
        add     x5, x5, x6
        mov     sp, x5
-#endif
 
 #ifdef CONFIG_SHADOW_CALL_STACK
        /* Use a separate shadow call stack for normal and critical events */