]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/smp: Move cpu number to percpu hot section
authorBrian Gerst <brgerst@gmail.com>
Mon, 3 Mar 2025 16:52:39 +0000 (11:52 -0500)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 19:30:33 +0000 (20:30 +0100)
No functional change.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250303165246.2175811-5-brgerst@gmail.com
arch/x86/include/asm/current.h
arch/x86/include/asm/smp.h
arch/x86/kernel/setup_percpu.c
kernel/bpf/verifier.c

index 46a736d6f2eca3ce9496210c3646c59dc35807dc..f988462d8b69694aaaebc4bae1aa578410ab30f4 100644 (file)
@@ -14,7 +14,6 @@ struct task_struct;
 
 struct pcpu_hot {
        struct task_struct      *current_task;
-       int                     cpu_number;
 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
        u64                     call_depth;
 #endif
index 76d7c013cd0899602751988d403445589aadaf62..bcfa00232d79095a7b352893829522359755b428 100644 (file)
@@ -6,7 +6,8 @@
 #include <linux/thread_info.h>
 
 #include <asm/cpumask.h>
-#include <asm/current.h>
+
+DECLARE_PER_CPU_CACHE_HOT(int, cpu_number);
 
 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
@@ -132,8 +133,8 @@ __visible void smp_call_function_single_interrupt(struct pt_regs *r);
  * This function is needed by all SMP systems. It must _always_ be valid
  * from the initial startup.
  */
-#define raw_smp_processor_id()  this_cpu_read(pcpu_hot.cpu_number)
-#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number)
+#define raw_smp_processor_id()  this_cpu_read(cpu_number)
+#define __smp_processor_id() __this_cpu_read(cpu_number)
 
 static inline struct cpumask *cpu_llc_shared_mask(int cpu)
 {
index 1e7be9409aa26d176e215cbc89a6ce7d9ebab185..175afc3ffb128412818a0ac56a0ed107b6bd62d0 100644 (file)
@@ -23,6 +23,9 @@
 #include <asm/cpumask.h>
 #include <asm/cpu.h>
 
+DEFINE_PER_CPU_CACHE_HOT(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 
@@ -161,7 +164,7 @@ void __init setup_per_cpu_areas(void)
        for_each_possible_cpu(cpu) {
                per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
                per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
-               per_cpu(pcpu_hot.cpu_number, cpu) = cpu;
+               per_cpu(cpu_number, cpu) = cpu;
                setup_percpu_segment(cpu);
                /*
                 * Copy data used in early init routines from the
index f4859516b190f0d532dd8845f4ea5533c28bd6b5..6e604caa870cbc0a059d648cf6ec9e0c9c01a879 100644 (file)
@@ -21702,12 +21702,12 @@ patch_map_ops_generic:
                if (insn->imm == BPF_FUNC_get_smp_processor_id &&
                    verifier_inlines_helper_call(env, insn->imm)) {
                        /* BPF_FUNC_get_smp_processor_id inlining is an
-                        * optimization, so if pcpu_hot.cpu_number is ever
+                        * optimization, so if cpu_number is ever
                         * changed in some incompatible and hard to support
                         * way, it's fine to back out this inlining logic
                         */
 #ifdef CONFIG_SMP
-                       insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
+                       insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&cpu_number);
                        insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
                        insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
                        cnt = 3;