]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/fair: Add NOHZ balancer flag for nohz.next_balance updates
authorValentin Schneider <valentin.schneider@arm.com>
Mon, 23 Aug 2021 11:16:59 +0000 (12:16 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 14 Dec 2024 18:51:43 +0000 (19:51 +0100)
[ Upstream commit efd984c481abb516fab8bafb25bf41fd9397a43c ]

A following patch will trigger NOHZ idle balances as a means to update
nohz.next_balance. Vincent noted that blocked load updates can have
non-negligible overhead, which should be avoided if the intent is to only
update nohz.next_balance.

Add a new NOHZ balance kick flag, NOHZ_NEXT_KICK. Gate NOHZ blocked load
update by the presence of NOHZ_STATS_KICK - currently all NOHZ balance
kicks will have the NOHZ_STATS_KICK flag set, so no change in behaviour is
expected.

Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210823111700.2842997-2-valentin.schneider@arm.com
Stable-dep-of: ff47a0acfcce ("sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy")
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h

index 68793b50adad7d3c4be1147d3f9c6f5b5f3f1773..6e1a6d6285d12bf3799e741ee9fa7f1c537e5dd1 100644 (file)
@@ -10764,7 +10764,7 @@ static void nohz_balancer_kick(struct rq *rq)
                goto out;
 
        if (rq->nr_running >= 2) {
-               flags = NOHZ_KICK_MASK;
+               flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
                goto out;
        }
 
@@ -10778,7 +10778,7 @@ static void nohz_balancer_kick(struct rq *rq)
                 * on.
                 */
                if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
-                       flags = NOHZ_KICK_MASK;
+                       flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
                        goto unlock;
                }
        }
@@ -10792,7 +10792,7 @@ static void nohz_balancer_kick(struct rq *rq)
                 */
                for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
                        if (sched_asym_prefer(i, cpu)) {
-                               flags = NOHZ_KICK_MASK;
+                               flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
                                goto unlock;
                        }
                }
@@ -10805,7 +10805,7 @@ static void nohz_balancer_kick(struct rq *rq)
                 * to run the misfit task on.
                 */
                if (check_misfit_status(rq, sd)) {
-                       flags = NOHZ_KICK_MASK;
+                       flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
                        goto unlock;
                }
 
@@ -10832,7 +10832,7 @@ static void nohz_balancer_kick(struct rq *rq)
                 */
                nr_busy = atomic_read(&sds->nr_busy_cpus);
                if (nr_busy > 1) {
-                       flags = NOHZ_KICK_MASK;
+                       flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
                        goto unlock;
                }
        }
@@ -10994,7 +10994,8 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
         * setting the flag, we are sure to not clear the state and not
         * check the load of an idle cpu.
         */
-       WRITE_ONCE(nohz.has_blocked, 0);
+       if (flags & NOHZ_STATS_KICK)
+               WRITE_ONCE(nohz.has_blocked, 0);
 
        /*
         * Ensures that if we miss the CPU, we must see the has_blocked
@@ -11016,13 +11017,15 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
                 * balancing owner will pick it up.
                 */
                if (need_resched()) {
-                       has_blocked_load = true;
+                       if (flags & NOHZ_STATS_KICK)
+                               has_blocked_load = true;
                        goto abort;
                }
 
                rq = cpu_rq(balance_cpu);
 
-               has_blocked_load |= update_nohz_stats(rq);
+               if (flags & NOHZ_STATS_KICK)
+                       has_blocked_load |= update_nohz_stats(rq);
 
                /*
                 * If time for next balance is due,
@@ -11053,8 +11056,9 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
        if (likely(update_next_balance))
                nohz.next_balance = next_balance;
 
-       WRITE_ONCE(nohz.next_blocked,
-               now + msecs_to_jiffies(LOAD_AVG_PERIOD));
+       if (flags & NOHZ_STATS_KICK)
+               WRITE_ONCE(nohz.next_blocked,
+                          now + msecs_to_jiffies(LOAD_AVG_PERIOD));
 
 abort:
        /* There is still blocked load, enable periodic update */
index 48bcc1876df831b593293745087f6f335ca5d0e5..6fc16bc13abf52a03a9a4777813be43804e92238 100644 (file)
@@ -2739,12 +2739,18 @@ extern void cfs_bandwidth_usage_dec(void);
 #define NOHZ_BALANCE_KICK_BIT  0
 #define NOHZ_STATS_KICK_BIT    1
 #define NOHZ_NEWILB_KICK_BIT   2
+#define NOHZ_NEXT_KICK_BIT     3
 
+/* Run rebalance_domains() */
 #define NOHZ_BALANCE_KICK      BIT(NOHZ_BALANCE_KICK_BIT)
+/* Update blocked load */
 #define NOHZ_STATS_KICK                BIT(NOHZ_STATS_KICK_BIT)
+/* Update blocked load when entering idle */
 #define NOHZ_NEWILB_KICK       BIT(NOHZ_NEWILB_KICK_BIT)
+/* Update nohz.next_balance */
+#define NOHZ_NEXT_KICK         BIT(NOHZ_NEXT_KICK_BIT)
 
-#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
+#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
 
 #define nohz_flags(cpu)        (&cpu_rq(cpu)->nohz_flags)