]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/percpu: Move pcpu_hot to percpu hot section
authorBrian Gerst <brgerst@gmail.com>
Mon, 3 Mar 2025 16:52:37 +0000 (11:52 -0500)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 19:30:33 +0000 (20:30 +0100)
Also change the alignment of the percpu hot section:

 -       PERCPU_SECTION(INTERNODE_CACHE_BYTES)
 +       PERCPU_SECTION(L1_CACHE_BYTES)

As vSMP will muck with INTERNODE_CACHE_BYTES that invalidates the
too-large-section assert we do:

  ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot section too large")

[ mingo: Added INTERNODE_CACHE_BYTES fix & explanation. ]

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250303165246.2175811-3-brgerst@gmail.com
arch/x86/include/asm/current.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/vmlinux.lds.S

index bf5953883ec365377fec5979f6d5c34418ebba32..60bc66edca833d271a07af369c2d519fad26e5cf 100644 (file)
 struct task_struct;
 
 struct pcpu_hot {
-       union {
-               struct {
-                       struct task_struct      *current_task;
-                       int                     preempt_count;
-                       int                     cpu_number;
+       struct task_struct      *current_task;
+       int                     preempt_count;
+       int                     cpu_number;
 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
-                       u64                     call_depth;
+       u64                     call_depth;
 #endif
-                       unsigned long           top_of_stack;
-                       void                    *hardirq_stack_ptr;
-                       u16                     softirq_pending;
+       unsigned long           top_of_stack;
+       void                    *hardirq_stack_ptr;
+       u16                     softirq_pending;
 #ifdef CONFIG_X86_64
-                       bool                    hardirq_stack_inuse;
+       bool                    hardirq_stack_inuse;
 #else
-                       void                    *softirq_stack_ptr;
+       void                    *softirq_stack_ptr;
 #endif
-               };
-               u8      pad[64];
-       };
 };
-static_assert(sizeof(struct pcpu_hot) == 64);
 
-DECLARE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot);
+DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
 
 /* const-qualified alias to pcpu_hot, aliased by linker. */
-DECLARE_PER_CPU_ALIGNED(const struct pcpu_hot __percpu_seg_override,
+DECLARE_PER_CPU_CACHE_HOT(const struct pcpu_hot __percpu_seg_override,
                        const_pcpu_hot);
 
 static __always_inline struct task_struct *get_current(void)
index 88a6707b765cfe6b50c87efb99d1b9063601d520..f00870b2c98075e9f871b57143c59ad9e9a1c846 100644 (file)
@@ -2064,7 +2064,7 @@ static __init int setup_setcpuid(char *arg)
 }
 __setup("setcpuid=", setup_setcpuid);
 
-DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
+DEFINE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot) = {
        .current_task   = &init_task,
        .preempt_count  = INIT_PREEMPT_COUNT,
        .top_of_stack   = TOP_OF_INIT_STACK,
index 1769a7126224c829cf7e48f0e5b5c539e5ea029e..0ef9870ea52ec7a007949c2650faa57a50173e48 100644 (file)
@@ -187,6 +187,8 @@ SECTIONS
 
                PAGE_ALIGNED_DATA(PAGE_SIZE)
 
+               CACHE_HOT_DATA(L1_CACHE_BYTES)
+
                CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 
                DATA_DATA
@@ -327,7 +329,8 @@ SECTIONS
                EXIT_DATA
        }
 
-       PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+       PERCPU_SECTION(L1_CACHE_BYTES)
+       ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot section too large")
 
        RUNTIME_CONST_VARIABLES
        RUNTIME_CONST(ptr, USER_PTR_MAX)