]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: Remove CONFIG_VMAP_STACK conditionals from THREAD_SHIFT and THREAD_ALIGN
authorBreno Leitao <leitao@debian.org>
Mon, 7 Jul 2025 16:01:03 +0000 (09:01 -0700)
committerWill Deacon <will@kernel.org>
Tue, 8 Jul 2025 12:41:08 +0000 (13:41 +0100)
Now that VMAP_STACK is always enabled on arm64, remove the
CONFIG_VMAP_STACK conditional logic from the definitions of THREAD_SHIFT
and THREAD_ALIGN in arch/arm64/include/asm/memory.h. This simplifies the
code by unconditionally setting THREAD_ALIGN to (2 * THREAD_SIZE) and
adjusting the THREAD_SHIFT definition to only depend on MIN_THREAD_SHIFT
and PAGE_SHIFT.

This change reflects the updated arm64 stack model, where all kernel
threads use virtually mapped stacks with guard pages, and ensures
alignment and stack sizing are consistently handled.

Signed-off-by: Breno Leitao <leitao@debian.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20250707-arm64_vmap-v1-3-8de98ca0f91c@debian.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/memory.h

index 717829df294eafd286df3b1e2e37e1dc9a814dab..5213248e081b0a1e26fba62bd52406c914d83817 100644 (file)
  * VMAP'd stacks are allocated at page granularity, so we must ensure that such
  * stacks are a multiple of page size.
  */
-#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
+#if (MIN_THREAD_SHIFT < PAGE_SHIFT)
 #define THREAD_SHIFT           PAGE_SHIFT
 #else
 #define THREAD_SHIFT           MIN_THREAD_SHIFT
  * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
  * assembly.
  */
-#ifdef CONFIG_VMAP_STACK
 #define THREAD_ALIGN           (2 * THREAD_SIZE)
-#else
-#define THREAD_ALIGN           THREAD_SIZE
-#endif
 
 #define IRQ_STACK_SIZE         THREAD_SIZE