From 0909c719c17b7a3e88dd1ee231b4a136c946c39e Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Mon, 7 Jul 2025 09:01:03 -0700 Subject: [PATCH] arm64: Remove CONFIG_VMAP_STACK conditionals from THREAD_SHIFT and THREAD_ALIGN Now that VMAP_STACK is always enabled on arm64, remove the CONFIG_VMAP_STACK conditional logic from the definitions of THREAD_SHIFT and THREAD_ALIGN in arch/arm64/include/asm/memory.h. This simplifies the code by unconditionally setting THREAD_ALIGN to (2 * THREAD_SIZE) and adjusting the THREAD_SHIFT definition to only depend on MIN_THREAD_SHIFT and PAGE_SHIFT. This change reflects the updated arm64 stack model, where all kernel threads use virtually mapped stacks with guard pages, and ensures alignment and stack sizing are consistently handled. Signed-off-by: Breno Leitao Acked-by: Ard Biesheuvel Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250707-arm64_vmap-v1-3-8de98ca0f91c@debian.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 717829df294ea..5213248e081b0 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -118,7 +118,7 @@ * VMAP'd stacks are allocated at page granularity, so we must ensure that such * stacks are a multiple of page size. */ -#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) +#if (MIN_THREAD_SHIFT < PAGE_SHIFT) #define THREAD_SHIFT PAGE_SHIFT #else #define THREAD_SHIFT MIN_THREAD_SHIFT @@ -135,11 +135,7 @@ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry * assembly. */ -#ifdef CONFIG_VMAP_STACK #define THREAD_ALIGN (2 * THREAD_SIZE) -#else -#define THREAD_ALIGN THREAD_SIZE -#endif #define IRQ_STACK_SIZE THREAD_SIZE -- 2.47.2