]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/topology: Compute sd_weight considering cpuset partitions
authorK Prateek Nayak <kprateek.nayak@amd.com>
Thu, 12 Mar 2026 04:44:26 +0000 (04:44 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 18 Mar 2026 08:06:47 +0000 (09:06 +0100)
The "sd_weight" used for calculating the load balancing interval, and
its limits, considers the span weight of the entire topology level
without accounting for cpuset partitions.

For example, consider a large system of 128CPUs divided into 8 * 16CPUs
partition which is typical when deploying virtual machines:

  [                      PKG Domain: 128CPUs                      ]

  [Partition0: 16CPUs][Partition1: 16CPUs] ... [Partition7: 16CPUs]

Although each partition only contains 16CPUs, the load balancing
interval is set to a minimum of 128 jiffies considering the span of the
entire domain with 128CPUs which can lead to longer imbalances within
the partition although balancing within is cheaper with 16CPUs.

Compute the "sd_weight" after computing the "sd_span" considering the
cpu_map covered by the partition, and set the load balancing interval,
and its limits accordingly.

For the above example, the balancing intervals for the partitions PKG
domain changes as follows:

                  before   after
balance_interval   128      16
min_interval       128      16
max_interval       256      32

Intervals are now proportional to the CPUs in the partitioned domain as
was intended by the original formula.

Fixes: cb83b629bae03 ("sched/numa: Rewrite the CONFIG_NUMA sched domain support")
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Reviewed-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://patch.msgid.link/20260312044434.1974-2-kprateek.nayak@amd.com
kernel/sched/topology.c

index 061f8c85f5552ce460d79dc60690af68db29ca60..79bab80af8f2bbec3544b1f36dd89a2643068ecf 100644 (file)
@@ -1645,13 +1645,17 @@ sd_init(struct sched_domain_topology_level *tl,
        struct cpumask *sd_span;
        u64 now = sched_clock();
 
-       sd_weight = cpumask_weight(tl->mask(tl, cpu));
+       sd_span = sched_domain_span(sd);
+       cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu));
+       sd_weight = cpumask_weight(sd_span);
+       sd_id = cpumask_first(sd_span);
 
        if (tl->sd_flags)
                sd_flags = (*tl->sd_flags)();
        if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
-                       "wrong sd_flags in topology description\n"))
+                     "wrong sd_flags in topology description\n"))
                sd_flags &= TOPOLOGY_SD_FLAGS;
+       sd_flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
 
        *sd = (struct sched_domain){
                .min_interval           = sd_weight,
@@ -1689,12 +1693,6 @@ sd_init(struct sched_domain_topology_level *tl,
                .name                   = tl->name,
        };
 
-       sd_span = sched_domain_span(sd);
-       cpumask_and(sd_span, cpu_map, tl->mask(tl, cpu));
-       sd_id = cpumask_first(sd_span);
-
-       sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
-
        WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
                  (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
                  "CPU capacity asymmetry not supported on SMT\n");