]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/fair: Proportional newidle balance
authorPeter Zijlstra <peterz@infradead.org>
Wed, 3 Dec 2025 11:25:52 +0000 (11:25 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 11 Jan 2026 14:19:26 +0000 (15:19 +0100)
commit 33cf66d88306663d16e4759e9d24766b0aaa2e17 upstream.

Add a randomized algorithm that runs newidle balancing proportional to
its success rate.

This improves schbench significantly:

 6.18-rc4: 2.22 Mrps/s
 6.18-rc4+revert: 2.04 Mrps/s
 6.18-rc4+revert+random: 2.18 Mrps/S

Conversely, per Adam Li this affects SpecJBB slightly, reducing it by 1%:

 6.17: -6%
 6.17+revert:  0%
 6.17+revert+random: -1%

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Chris Mason <clm@meta.com>
Link: https://lkml.kernel.org/r/6825c50d-7fa7-45d8-9b81-c6e7e25738e2@meta.com
Link: https://patch.msgid.link/20251107161739.770122091@infradead.org
[ Ajay: Modified to apply on v6.1 ]
Signed-off-by: Ajay Kaher <ajay.kaher@broadcom.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/sched/topology.h
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/sched.h
kernel/sched/topology.c

index 816df6cc444e1c790c4d6a387428bc334a9a43ef..caeceec3eb0b923352e7b90506fc1c27c6a6a7d3 100644 (file)
@@ -106,6 +106,9 @@ struct sched_domain {
        unsigned int nr_balance_failed; /* initialise to 0 */
 
        /* idle_balance() stats */
+       unsigned int newidle_call;
+       unsigned int newidle_success;
+       unsigned int newidle_ratio;
        u64 max_newidle_lb_cost;
        unsigned long last_decay_max_lb_cost;
 
index 9b01fdceb62200e9b68e5bbb8efd80b5d2ff2f7d..09ffe1b96643108543e79a43eb5e8a3d00720993 100644 (file)
@@ -112,6 +112,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
 
 #ifdef CONFIG_SCHED_DEBUG
 /*
@@ -9632,6 +9633,8 @@ void __init sched_init_smp(void)
 {
        sched_init_numa(NUMA_NO_NODE);
 
+       prandom_init_once(&sched_rnd_state);
+
        /*
         * There's no userspace yet to cause hotplug operations; hence all the
         * CPU masks are stable and all blatant races in the below code cannot
index 2f296e2af5e4ad6d777dfbb53f674fc39da9f361..9f7c9083e9bf9eee1b81550d27a245eebd84eba9 100644 (file)
@@ -10935,11 +10935,27 @@ void update_max_interval(void)
        max_load_balance_interval = HZ*num_online_cpus()/10;
 }
 
-static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
+static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
+{
+       sd->newidle_call++;
+       sd->newidle_success += success;
+
+       if (sd->newidle_call >= 1024) {
+               sd->newidle_ratio = sd->newidle_success;
+               sd->newidle_call /= 2;
+               sd->newidle_success /= 2;
+       }
+}
+
+static inline bool
+update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
 {
        unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
        unsigned long now = jiffies;
 
+       if (cost)
+               update_newidle_stats(sd, success);
+
        if (cost > sd->max_newidle_lb_cost) {
                /*
                 * Track max cost of a domain to make sure to not delay the
@@ -10987,7 +11003,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
                 * Decay the newidle max times here because this is a regular
                 * visit to all the domains.
                 */
-               need_decay = update_newidle_cost(sd, 0);
+               need_decay = update_newidle_cost(sd, 0, 0);
                max_cost += sd->max_newidle_lb_cost;
 
                /*
@@ -11621,6 +11637,22 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
                        break;
 
                if (sd->flags & SD_BALANCE_NEWIDLE) {
+                       unsigned int weight = 1;
+
+                       if (sched_feat(NI_RANDOM)) {
+                               /*
+                                * Throw a 1k sided dice; and only run
+                                * newidle_balance according to the success
+                                * rate.
+                                */
+                               u32 d1k = sched_rng() % 1024;
+                               weight = 1 + sd->newidle_ratio;
+                               if (d1k > weight) {
+                                       update_newidle_stats(sd, 0);
+                                       continue;
+                               }
+                               weight = (1024 + weight/2) / weight;
+                       }
 
                        pulled_task = load_balance(this_cpu, this_rq,
                                                   sd, CPU_NEWLY_IDLE,
@@ -11628,10 +11660,14 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
 
                        t1 = sched_clock_cpu(this_cpu);
                        domain_cost = t1 - t0;
-                       update_newidle_cost(sd, domain_cost);
-
                        curr_cost += domain_cost;
                        t0 = t1;
+
+                       /*
+                        * Track max cost of a domain to make sure to not delay the
+                        * next wakeup on the CPU.
+                        */
+                       update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
                }
 
                /*
index ee7f23c76bd33692647627426ee30e503ccbf95a..0115183ee74e927c836a7713b07488bd7069214d 100644 (file)
@@ -99,5 +99,10 @@ SCHED_FEAT(UTIL_EST_FASTUP, true)
 
 SCHED_FEAT(LATENCY_WARN, false)
 
+/*
+ * Do newidle balancing proportional to its success rate using randomization.
+ */
+SCHED_FEAT(NI_RANDOM, true)
+
 SCHED_FEAT(ALT_PERIOD, true)
 SCHED_FEAT(BASE_SLICE, true)
index 95afded0b174f700af41c43064d2ec778c209352..6f66a9b1aaa982187fc03150136ee2b04171ee9e 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef _KERNEL_SCHED_SCHED_H
 #define _KERNEL_SCHED_SCHED_H
 
+#include <linux/prandom.h>
 #include <linux/sched/affinity.h>
 #include <linux/sched/autogroup.h>
 #include <linux/sched/cpufreq.h>
@@ -1190,6 +1191,12 @@ static inline bool is_migration_disabled(struct task_struct *p)
 }
 
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
+
+static inline u32 sched_rng(void)
+{
+       return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
+}
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              this_cpu_ptr(&runqueues)
index d404b5d2d842e620a54633ba2d1eb9b3ef384162..9d6ec8311167d92f6505520d64c4deb5e90a169e 100644 (file)
@@ -1584,6 +1584,12 @@ sd_init(struct sched_domain_topology_level *tl,
 
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+
+               /* 50% success rate */
+               .newidle_call           = 512,
+               .newidle_success        = 256,
+               .newidle_ratio          = 512,
+
                .max_newidle_lb_cost    = 0,
                .last_decay_max_lb_cost = jiffies,
                .child                  = child,