]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/balancing: Rename update_blocked_averages() => sched_balance_update_blocked_ave...
authorIngo Molnar <mingo@kernel.org>
Fri, 8 Mar 2024 11:18:15 +0000 (12:18 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 12 Mar 2024 11:00:00 +0000 (12:00 +0100)
Standardize scheduler load-balancing function names on the
sched_balance_() prefix.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Link: https://lore.kernel.org/r/20240308111819.1101550-10-mingo@kernel.org
kernel/sched/fair.c
kernel/sched/pelt.c

index 96a81b2fa281f1bbc12f8fdbaa9fdec699c14190..95f7092043f3ff7ae3c8a16c48ddabf61118ba22 100644 (file)
@@ -9411,7 +9411,7 @@ static unsigned long task_h_load(struct task_struct *p)
 }
 #endif
 
-static void update_blocked_averages(int cpu)
+static void sched_balance_update_blocked_averages(int cpu)
 {
        bool decayed = false, done = true;
        struct rq *rq = cpu_rq(cpu);
@@ -12079,7 +12079,7 @@ static bool update_nohz_stats(struct rq *rq)
        if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
                return true;
 
-       update_blocked_averages(cpu);
+       sched_balance_update_blocked_averages(cpu);
 
        return rq->has_blocked_load;
 }
@@ -12339,7 +12339,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
        raw_spin_rq_unlock(this_rq);
 
        t0 = sched_clock_cpu(this_cpu);
-       update_blocked_averages(this_cpu);
+       sched_balance_update_blocked_averages(this_cpu);
 
        rcu_read_lock();
        for_each_domain(this_cpu, sd) {
@@ -12431,7 +12431,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
                return;
 
        /* normal load balance */
-       update_blocked_averages(this_rq->cpu);
+       sched_balance_update_blocked_averages(this_rq->cpu);
        sched_balance_domains(this_rq, idle);
 }
 
index 63b6cf8982201d94f8173b678d387b6132a5feff..f80955ecdce6946dce817394773b5d1e95178e5f 100644 (file)
@@ -209,7 +209,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
         * This means that weight will be 0 but not running for a sched_entity
         * but also for a cfs_rq if the latter becomes idle. As an example,
         * this happens during idle_balance() which calls
-        * update_blocked_averages().
+        * sched_balance_update_blocked_averages().
         *
         * Also see the comment in accumulate_sum().
         */