]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cpufreq_schedutil: Refactor sugov_cpu_is_busy()
authorTejun Heo <tj@kernel.org>
Fri, 21 Jun 2024 22:37:03 +0000 (12:37 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 21 Jun 2024 22:37:03 +0000 (12:37 -1000)
sugov_cpu_is_busy() is used to avoid decreasing performance level while the
CPU is busy and called by sugov_update_single_freq() and
sugov_update_single_perf(). Both callers repeat the same pattern to first
test for uclamp and then the business. Let's refactor so that the tests
aren't repeated.

The new helper is named sugov_hold_freq() and tests both the uclamp
exception and CPU business. No functional changes. This will make adding
more exception conditions easier.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: David Vernet <dvernet@meta.com>
Reviewed-by: Christian Loehle <christian.loehle@arm.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
kernel/sched/cpufreq_schedutil.c

index eece6244f9d2fea301f5523ef1d7d6779f0d4625..972b7dd65af2d20b1d1731954de27cec73783d08 100644 (file)
@@ -325,16 +325,27 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
-static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+static bool sugov_hold_freq(struct sugov_cpu *sg_cpu)
 {
-       unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
-       bool ret = idle_calls == sg_cpu->saved_idle_calls;
+       unsigned long idle_calls;
+       bool ret;
+
+       /* if capped by uclamp_max, always update to be in compliance */
+       if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
+               return false;
+
+       /*
+        * Maintain the frequency if the CPU has not been idle recently, as
+        * reduction is likely to be premature.
+        */
+       idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
+       ret = idle_calls == sg_cpu->saved_idle_calls;
 
        sg_cpu->saved_idle_calls = idle_calls;
        return ret;
 }
 #else
-static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
 #endif /* CONFIG_NO_HZ_COMMON */
 
 /*
@@ -382,14 +393,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
                return;
 
        next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
-       /*
-        * Do not reduce the frequency if the CPU has not been idle
-        * recently, as the reduction is likely to be premature then.
-        *
-        * Except when the rq is capped by uclamp_max.
-        */
-       if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
-           sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
+
+       if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq &&
            !sg_policy->need_freq_update) {
                next_f = sg_policy->next_freq;
 
@@ -436,14 +441,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
        if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
                return;
 
-       /*
-        * Do not reduce the target performance level if the CPU has not been
-        * idle recently, as the reduction is likely to be premature then.
-        *
-        * Except when the rq is capped by uclamp_max.
-        */
-       if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
-           sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
+       if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util)
                sg_cpu->util = prev_util;
 
        cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,