]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
smpboot: introduce SDTL_INIT() helper to tidy sched topology setup
authorLi Chen <chenl311@chinatelecom.cn>
Thu, 10 Jul 2025 10:57:07 +0000 (18:57 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 14 Jul 2025 08:59:34 +0000 (10:59 +0200)
Define a small SDTL_INIT(maskfn, flagsfn, name) macro and use it to build the
sched_domain_topology_level array. Purely a cleanup; behaviour is unchanged.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Li Chen <chenl311@chinatelecom.cn>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lore.kernel.org/r/20250710105715.66594-2-me@linux.beauty
arch/powerpc/kernel/smp.c
arch/s390/kernel/topology.c
arch/x86/kernel/smpboot.c
include/linux/sched/topology.h
kernel/sched/topology.c

index 5ac7084eebc0b8c5ab16d96c89cadde953003431..f59e4b9cc2074370987e7764f5e83af6ee14b9a0 100644 (file)
@@ -1700,28 +1700,23 @@ static void __init build_sched_topology(void)
 #ifdef CONFIG_SCHED_SMT
        if (has_big_cores) {
                pr_info("Big cores detected but using small core scheduling\n");
-               powerpc_topology[i++] = (struct sched_domain_topology_level){
-                       smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
-               };
+               powerpc_topology[i++] =
+                       SDTL_INIT(smallcore_smt_mask, powerpc_smt_flags, SMT);
        } else {
-               powerpc_topology[i++] = (struct sched_domain_topology_level){
-                       cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
-               };
+               powerpc_topology[i++] = SDTL_INIT(cpu_smt_mask, powerpc_smt_flags, SMT);
        }
 #endif
        if (shared_caches) {
-               powerpc_topology[i++] = (struct sched_domain_topology_level){
-                       shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
-               };
+               powerpc_topology[i++] =
+                       SDTL_INIT(shared_cache_mask, powerpc_shared_cache_flags, CACHE);
        }
+
        if (has_coregroup_support()) {
-               powerpc_topology[i++] = (struct sched_domain_topology_level){
-                       cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
-               };
+               powerpc_topology[i++] =
+                       SDTL_INIT(cpu_mc_mask, powerpc_shared_proc_flags, MC);
        }
-       powerpc_topology[i++] = (struct sched_domain_topology_level){
-               cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
-       };
+
+       powerpc_topology[i++] = SDTL_INIT(cpu_cpu_mask, powerpc_shared_proc_flags, PKG);
 
        /* There must be one trailing NULL entry left.  */
        BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
index 3df048e190b11436b215d9e376f0b011e7ff315c..46569b8e47dde3ddece57feeb3ad7ec11f73b8bb 100644 (file)
@@ -531,11 +531,11 @@ static const struct cpumask *cpu_drawer_mask(int cpu)
 }
 
 static struct sched_domain_topology_level s390_topology[] = {
-       { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
-       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-       { cpu_book_mask, SD_INIT_NAME(BOOK) },
-       { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
-       { cpu_cpu_mask, SD_INIT_NAME(PKG) },
+       SDTL_INIT(cpu_thread_mask, cpu_smt_flags, SMT),
+       SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
+       SDTL_INIT(cpu_book_mask, NULL, BOOK),
+       SDTL_INIT(cpu_drawer_mask, NULL, DRAWER),
+       SDTL_INIT(cpu_cpu_mask, NULL, PKG),
        { NULL, },
 };
 
index fc78c2325fd2986a40ab46d7b59b237b0adc40a8..e0adf75f617a990c2931919d88a020e247a199e4 100644 (file)
@@ -485,35 +485,26 @@ static void __init build_sched_topology(void)
        int i = 0;
 
 #ifdef CONFIG_SCHED_SMT
-       x86_topology[i++] = (struct sched_domain_topology_level){
-               cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT)
-       };
+       x86_topology[i++] = SDTL_INIT(cpu_smt_mask, cpu_smt_flags, SMT);
 #endif
 #ifdef CONFIG_SCHED_CLUSTER
-       x86_topology[i++] = (struct sched_domain_topology_level){
-               cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS)
-       };
+       x86_topology[i++] = SDTL_INIT(cpu_clustergroup_mask, x86_cluster_flags, CLS);
 #endif
 #ifdef CONFIG_SCHED_MC
-       x86_topology[i++] = (struct sched_domain_topology_level){
-               cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC)
-       };
+       x86_topology[i++] = SDTL_INIT(cpu_coregroup_mask, x86_core_flags, MC);
 #endif
        /*
         * When there is NUMA topology inside the package skip the PKG domain
         * since the NUMA domains will auto-magically create the right spanning
         * domains based on the SLIT.
         */
-       if (!x86_has_numa_in_package) {
-               x86_topology[i++] = (struct sched_domain_topology_level){
-                       cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(PKG)
-               };
-       }
+       if (!x86_has_numa_in_package)
+               x86_topology[i++] = SDTL_INIT(cpu_cpu_mask, x86_sched_itmt_flags, PKG);
 
        /*
         * There must be one trailing NULL entry left.
         */
-       BUG_ON(i >= ARRAY_SIZE(x86_topology)-1);
+       BUG_ON(i >= ARRAY_SIZE(x86_topology) - 1);
 
        set_sched_topology(x86_topology);
 }
index e54e7fa76ba634b5a04edf7d07880fcccbf65fc5..0d5daaa277b755041e8e24dc59e0fe8d1f5e4544 100644 (file)
@@ -196,8 +196,8 @@ struct sched_domain_topology_level {
 extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
 extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
 
-
-# define SD_INIT_NAME(type)            .name = #type
+#define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \
+           { .mask = maskfn, .sd_flags = flagsfn, .name = #dname })
 
 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
 extern void rebuild_sched_domains_energy(void);
index 8e06b1d22e91e1c2b56b1b0e1b39e50051811ccf..d01f5a49f2e7a7cdab9167b3d7c83492be770487 100644 (file)
@@ -1737,17 +1737,17 @@ sd_init(struct sched_domain_topology_level *tl,
  */
 static struct sched_domain_topology_level default_topology[] = {
 #ifdef CONFIG_SCHED_SMT
-       { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+       SDTL_INIT(cpu_smt_mask, cpu_smt_flags, SMT),
 #endif
 
 #ifdef CONFIG_SCHED_CLUSTER
-       { cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
+       SDTL_INIT(cpu_clustergroup_mask, cpu_cluster_flags, CLS),
 #endif
 
 #ifdef CONFIG_SCHED_MC
-       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+       SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
 #endif
-       { cpu_cpu_mask, SD_INIT_NAME(PKG) },
+       SDTL_INIT(cpu_cpu_mask, NULL, PKG),
        { NULL, },
 };
 
@@ -2008,23 +2008,15 @@ void sched_init_numa(int offline_node)
        /*
         * Add the NUMA identity distance, aka single NODE.
         */
-       tl[i++] = (struct sched_domain_topology_level){
-               .mask = sd_numa_mask,
-               .numa_level = 0,
-               SD_INIT_NAME(NODE)
-       };
+       tl[i++] = SDTL_INIT(sd_numa_mask, NULL, NODE);
 
        /*
         * .. and append 'j' levels of NUMA goodness.
         */
        for (j = 1; j < nr_levels; i++, j++) {
-               tl[i] = (struct sched_domain_topology_level){
-                       .mask = sd_numa_mask,
-                       .sd_flags = cpu_numa_flags,
-                       .flags = SDTL_OVERLAP,
-                       .numa_level = j,
-                       SD_INIT_NAME(NUMA)
-               };
+               tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA);
+               tl[i].numa_level = j;
+               tl[i].flags = SDTL_OVERLAP;
        }
 
        sched_domain_topology_saved = sched_domain_topology;