]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/percpu: Move top_of_stack to percpu hot section
authorBrian Gerst <brgerst@gmail.com>
Mon, 3 Mar 2025 16:52:43 +0000 (11:52 -0500)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 19:30:33 +0000 (20:30 +0100)
No functional change.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250303165246.2175811-9-brgerst@gmail.com
12 files changed:
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/current.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/processor.h
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/vmlinux.lds.S

index 20be5758c2d2e235484e32329068233eb8a72d85..92c0b4a94e0ad48b9b9b9081e61521aaf92df128 100644 (file)
@@ -1153,7 +1153,7 @@ SYM_CODE_START(asm_exc_nmi)
         * is using the thread stack right now, so it's safe for us to use it.
         */
        movl    %esp, %ebx
-       movl    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esp
+       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
        call    exc_nmi
        movl    %ebx, %esp
 
@@ -1217,7 +1217,7 @@ SYM_CODE_START(rewind_stack_and_make_dead)
        /* Prevent any naive code from trying to unwind to our caller. */
        xorl    %ebp, %ebp
 
-       movl    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esi
+       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esi
        leal    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
 
        call    make_task_dead
index 49d3b222fe99dc945a102fe0fdd870101a5e0be2..f40bdf97d390a7396315d33c720d2d52d65b0238 100644 (file)
@@ -92,7 +92,7 @@ SYM_CODE_START(entry_SYSCALL_64)
        /* tss.sp2 is scratch space. */
        movq    %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
-       movq    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
 SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
@@ -1168,7 +1168,7 @@ SYM_CODE_START(asm_exc_nmi)
        FENCE_SWAPGS_USER_ENTRY
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
        movq    %rsp, %rdx
-       movq    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
        UNWIND_HINT_IRET_REGS base=%rdx offset=8
        pushq   5*8(%rdx)       /* pt_regs->ss */
        pushq   4*8(%rdx)       /* pt_regs->rsp */
@@ -1486,7 +1486,7 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
        /* Prevent any naive code from trying to unwind to our caller. */
        xorl    %ebp, %ebp
 
-       movq    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rax
        leaq    -PTREGS_SIZE(%rax), %rsp
        UNWIND_HINT_REGS
 
index ed0a5f2dc1297723a3e815964e1aab37fb7f7b6f..a45e1125fc6cf85cc744f568c778d9e0bb26de18 100644 (file)
@@ -57,7 +57,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
        popq    %rax
 
-       movq    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        /* Construct struct pt_regs on stack */
        pushq   $__USER_DS              /* pt_regs->ss */
@@ -193,7 +193,7 @@ SYM_CODE_START(entry_SYSCALL_compat)
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
 
        /* Switch to the kernel stack */
-       movq    PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
 SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR
index 6fad5a4c21d7c0181ce6961cfefd044d9cf28f82..3d1b123c2ee32b9688db0f8ddccdf0d1731e34ca 100644 (file)
@@ -14,7 +14,6 @@ struct task_struct;
 
 struct pcpu_hot {
        struct task_struct      *current_task;
-       unsigned long           top_of_stack;
 };
 
 DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
index 8a8cf86dded32bcd94c2293487965fed71ff7f87..41517a7f7f1c59f0f543b2df0d9f50bb542e935b 100644 (file)
@@ -551,7 +551,7 @@ do {                                                                        \
  * it is accessed while this_cpu_read_stable() allows the value to be cached.
  * this_cpu_read_stable() is more efficient and can be used if its value
  * is guaranteed to be valid across CPUs.  The current users include
- * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
+ * pcpu_hot.current_task and cpu_current_top_of_stack, both of which are
  * actually per-thread variables implemented as per-CPU variables and
  * thus stable for the duration of the respective task.
  */
index 6bb6af0b543040bcc74243a80519065497365d72..7a3918308a3671149ce147142936d1bbcd4dc645 100644 (file)
@@ -422,6 +422,11 @@ DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
 DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
 #endif
 
+DECLARE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack);
+/* const-qualified alias provided by the linker. */
+DECLARE_PER_CPU_CACHE_HOT(const unsigned long __percpu_seg_override,
+                         const_cpu_current_top_of_stack);
+
 #ifdef CONFIG_X86_64
 static inline unsigned long cpu_kernelmode_gs_base(int cpu)
 {
@@ -547,9 +552,9 @@ static __always_inline unsigned long current_top_of_stack(void)
         *  entry trampoline.
         */
        if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
-               return this_cpu_read_const(const_pcpu_hot.top_of_stack);
+               return this_cpu_read_const(const_cpu_current_top_of_stack);
 
-       return this_cpu_read_stable(pcpu_hot.top_of_stack);
+       return this_cpu_read_stable(cpu_current_top_of_stack);
 }
 
 static __always_inline bool on_thread_stack(void)
index 6fae88f8ae1e7c027afdc9400c1e78c2848207f0..54ace808defd221dcd411d50c39bf6ab715f2ff3 100644 (file)
@@ -107,7 +107,6 @@ static void __used common(void)
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
        OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
-       OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack);
        OFFSET(X86_current_task, pcpu_hot, current_task);
 #if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64)
        /* Offset for fields in aria_ctx */
index fd224ae57d62e5cb163a0cb2050f25b7b79ef0f8..51653e01a7161fe64327424fecef46d6078de128 100644 (file)
@@ -2066,7 +2066,6 @@ __setup("setcpuid=", setup_setcpuid);
 
 DEFINE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot) = {
        .current_task   = &init_task,
-       .top_of_stack   = TOP_OF_INIT_STACK,
 };
 EXPORT_PER_CPU_SYMBOL(pcpu_hot);
 EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
@@ -2074,6 +2073,8 @@ EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
 DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
+DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
+
 #ifdef CONFIG_X86_64
 /*
  * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
index 2bdab416298cef5d9ef11c8e957c46dfc49ee2d2..8ec44acb863be9cb8a165789df13833ab618f12a 100644 (file)
@@ -190,13 +190,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        arch_end_context_switch(next_p);
 
        /*
-        * Reload esp0 and pcpu_hot.top_of_stack.  This changes
+        * Reload esp0 and cpu_current_top_of_stack.  This changes
         * current_thread_info().  Refresh the SYSENTER configuration in
         * case prev or next is vm86.
         */
        update_task_stack(next_p);
        refresh_sysenter_cs(next);
-       this_cpu_write(pcpu_hot.top_of_stack,
+       this_cpu_write(cpu_current_top_of_stack,
                       (unsigned long)task_stack_page(next_p) +
                       THREAD_SIZE);
 
index 2f38416deb746ff7217518a1653cb9ff9a77fc06..d8f4bcef8ee4a24411b22c01470a99438f448f52 100644 (file)
@@ -669,7 +669,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch the PDA and FPU contexts.
         */
        raw_cpu_write(pcpu_hot.current_task, next_p);
-       raw_cpu_write(pcpu_hot.top_of_stack, task_top_of_stack(next_p));
+       raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
        switch_fpu_finish(next_p);
 
index c5aabddf557341a0a435edb2aaa6684a08d35085..c3a26e60e3c40780a5aa65facb215888557bea00 100644 (file)
@@ -832,7 +832,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
 
 #ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
-       per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle);
+       per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
 #endif
        return 0;
 }
index 0ef9870ea52ec7a007949c2650faa57a50173e48..475f6717f27ad36dad42f710147dc42cf5f3aea4 100644 (file)
@@ -44,6 +44,7 @@ ENTRY(phys_startup_64)
 
 jiffies = jiffies_64;
 const_pcpu_hot = pcpu_hot;
+const_cpu_current_top_of_stack = cpu_current_top_of_stack;
 
 #if defined(CONFIG_X86_64)
 /*