]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/fair: Proportional newidle balance
authorPeter Zijlstra <peterz@infradead.org>
Fri, 7 Nov 2025 16:01:31 +0000 (17:01 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 17 Nov 2025 16:13:16 +0000 (17:13 +0100)
Add a randomized algorithm that runs newidle balancing proportional to
its success rate.

This improves schbench significantly:

 6.18-rc4: 2.22 Mrps/s
 6.18-rc4+revert: 2.04 Mrps/s
 6.18-rc4+revert+random: 2.18 Mrps/S

Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:

 6.17: -6%
 6.17+revert:  0%
 6.17+revert+random: -1%

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Chris Mason <clm@meta.com>
Link: https://lkml.kernel.org/r/6825c50d-7fa7-45d8-9b81-c6e7e25738e2@meta.com
Link: https://patch.msgid.link/20251107161739.770122091@infradead.org
include/linux/sched/topology.h
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/sched.h
kernel/sched/topology.c

index bbcfdf12aa6e574c613153c377bf051c7640200d..45c0022b91ced38e49c4fd37610bbc2c9c967f89 100644 (file)
@@ -92,6 +92,9 @@ struct sched_domain {
        unsigned int nr_balance_failed; /* initialise to 0 */
 
        /* idle_balance() stats */
+       unsigned int newidle_call;
+       unsigned int newidle_success;
+       unsigned int newidle_ratio;
        u64 max_newidle_lb_cost;
        unsigned long last_decay_max_lb_cost;
 
index 699db3f46df6469ba59fe6b91450330f2e751aea..9f10cfbdc228d84cf8e6d221e7d2bc95475cfe37 100644 (file)
@@ -121,6 +121,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
 
 #ifdef CONFIG_SCHED_PROXY_EXEC
 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -8489,6 +8490,8 @@ void __init sched_init_smp(void)
 {
        sched_init_numa(NUMA_NO_NODE);
 
+       prandom_init_once(&sched_rnd_state);
+
        /*
         * There's no userspace yet to cause hotplug operations; hence all the
         * CPU masks are stable and all blatant races in the below code cannot
index abcbb67dd7851b4c3355ceaea708ce41ed4f1b01..1855975b82485c5aa4eb0f0f387cb437da18a50d 100644 (file)
@@ -12224,11 +12224,27 @@ void update_max_interval(void)
        max_load_balance_interval = HZ*num_online_cpus()/10;
 }
 
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+       sd->newidle_call++;
+       sd->newidle_success += success;
+
+       if (sd->newidle_call >= 1024) {
+               sd->newidle_ratio = sd->newidle_success;
+               sd->newidle_call /= 2;
+               sd->newidle_success /= 2;
+       }
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
 {
        unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
        unsigned long now = jiffies;
 
+       if (cost)
+               update_newidle_stats(sd, success);
+
        if (cost > sd->max_newidle_lb_cost) {
                /*
                 * Track max cost of a domain to make sure to not delay the
@@ -12276,7 +12292,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
                 * Decay the newidle max times here because this is a regular
                 * visit to all the domains.
                 */
-               need_decay = update_newidle_cost(sd, 0);
+               need_decay = update_newidle_cost(sd, 0, 0);
                max_cost += sd->max_newidle_lb_cost;
 
                /*
@@ -12912,6 +12928,22 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
                        break;
 
                if (sd->flags & SD_BALANCE_NEWIDLE) {
+                       unsigned int weight = 1;
+
+                       if (sched_feat(NI_RANDOM)) {
+                               /*
+                                * Throw a 1k sided dice; and only run
+                                * newidle_balance according to the success
+                                * rate.
+                                */
+                               u32 d1k = sched_rng() % 1024;
+                               weight = 1 + sd->newidle_ratio;
+                               if (d1k > weight) {
+                                       update_newidle_stats(sd, 0);
+                                       continue;
+                               }
+                               weight = (1024 + weight/2) / weight;
+                       }
 
                        pulled_task = sched_balance_rq(this_cpu, this_rq,
                                                   sd, CPU_NEWLY_IDLE,
@@ -12919,10 +12951,14 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
 
                        t1 = sched_clock_cpu(this_cpu);
                        domain_cost = t1 - t0;
-                       update_newidle_cost(sd, domain_cost);
-
                        curr_cost += domain_cost;
                        t0 = t1;
+
+                       /*
+                        * Track max cost of a domain to make sure to not delay the
+                        * next wakeup on the CPU.
+                        */
+                       update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
                }
 
                /*
index 0607def744af6f1aefa120dc4474ac311814372c..980d92bab8abfaaac610b40b71b888c08a7aa155 100644 (file)
@@ -121,3 +121,8 @@ SCHED_FEAT(WA_BIAS, true)
 SCHED_FEAT(UTIL_EST, true)
 
 SCHED_FEAT(LATENCY_WARN, false)
+
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
index def9ab7b59d428430ecdb012713571ab0604b527..b419a4d98461c20235660ed15e583b2323e63ed9 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef _KERNEL_SCHED_SCHED_H
 #define _KERNEL_SCHED_SCHED_H
 
+#include <linux/prandom.h>
 #include <linux/sched/affinity.h>
 #include <linux/sched/autogroup.h>
 #include <linux/sched/cpufreq.h>
@@ -1348,6 +1349,12 @@ static inline bool is_migration_disabled(struct task_struct *p)
 }
 
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+       return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              this_cpu_ptr(&runqueues)
index 711076aa4980184ca88ca28e0a21e635fb79794d..cf643a5ddedd2a1eb0aa4a4d6b48ba823bb1330a 100644 (file)
@@ -1669,6 +1669,12 @@ sd_init(struct sched_domain_topology_level *tl,
 
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+
+               /* 50% success rate */
+               .newidle_call           = 512,
+               .newidle_success        = 256,
+               .newidle_ratio          = 512,
+
                .max_newidle_lb_cost    = 0,
                .last_decay_max_lb_cost = jiffies,
                .child                  = child,