]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Check the SD_ASYM_PACKING flag in sched_use_asym_prio()
authorAlex Shi <alexs@kernel.org>
Sat, 10 Feb 2024 11:39:22 +0000 (19:39 +0800)
committerIngo Molnar <mingo@kernel.org>
Wed, 28 Feb 2024 14:43:17 +0000 (15:43 +0100)
sched_use_asym_prio() checks whether CPU priorities should be used. It
makes sense to check for the SD_ASYM_PACKING() inside the function.
Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
remove the now superfluous checks for the flag in various places.

Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20240210113924.1130448-4-alexs@kernel.org
kernel/sched/fair.c

index 475e2ca66b6324fa1d4d07255650accefba1a5cc..39781a666c087efe5c0baf168280ee0000610d86 100644 (file)
@@ -9744,6 +9744,9 @@ group_type group_classify(unsigned int imbalance_pct,
  */
 static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
 {
+       if (!(sd->flags & SD_ASYM_PACKING))
+               return false;
+
        if (!sched_smt_active())
                return true;
 
@@ -9937,11 +9940,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        sgs->group_weight = group->group_weight;
 
        /* Check if dst CPU is idle and preferred to this group */
-       if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
-           env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
-           sched_group_asym(env, sgs, group)) {
+       if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
+           sched_group_asym(env, sgs, group))
                sgs->group_asym_packing = 1;
-       }
 
        /* Check for loaded SMT group to be balanced to dst CPU */
        if (!local_group && smt_balance(env, sgs, group))
@@ -11024,9 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                 * If balancing between cores, let lower priority CPUs help
                 * SMT cores with more than one busy sibling.
                 */
-               if ((env->sd->flags & SD_ASYM_PACKING) &&
-                   sched_asym(env->sd, i, env->dst_cpu) &&
-                   nr_running == 1)
+               if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
                        continue;
 
                switch (env->migration_type) {
@@ -11122,8 +11121,7 @@ asym_active_balance(struct lb_env *env)
         * the lower priority @env::dst_cpu help it. Do not follow
         * CPU priority.
         */
-       return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
-              sched_use_asym_prio(env->sd, env->dst_cpu) &&
+       return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
               (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
                !sched_use_asym_prio(env->sd, env->src_cpu));
 }