]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/smt: Introduce sched_smt_present_inc/dec() helper
authorYang Yingliang <yangyingliang@huawei.com>
Wed, 3 Jul 2024 03:16:07 +0000 (11:16 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Aug 2024 13:34:34 +0000 (15:34 +0200)
commit 31b164e2e4af84d08d2498083676e7eeaa102493 upstream.

Introduce sched_smt_present_inc/dec() helper, so it can be called
in normal or error path simply. No functional changed.

Cc: stable@kernel.org
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240703031610.587047-2-yangyingliang@huaweicloud.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/sched/core.c

index ebf21373f6634ff2c9f88649f760636a3ee8a19f..66916750346a1b4d0273ccf7515a832e7d7d902e 100644 (file)
@@ -9654,6 +9654,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
        return 0;
 }
 
+static inline void sched_smt_present_inc(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               static_branch_inc_cpuslocked(&sched_smt_present);
+#endif
+}
+
+static inline void sched_smt_present_dec(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+}
+
 int sched_cpu_activate(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -9665,13 +9681,10 @@ int sched_cpu_activate(unsigned int cpu)
         */
        balance_push_set(cpu, false);
 
-#ifdef CONFIG_SCHED_SMT
        /*
         * When going up, increment the number of cores with SMT present.
         */
-       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
-               static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
+       sched_smt_present_inc(cpu);
        set_cpu_active(cpu, true);
 
        if (sched_smp_initialized) {
@@ -9740,13 +9753,12 @@ int sched_cpu_deactivate(unsigned int cpu)
        }
        rq_unlock_irqrestore(rq, &rf);
 
-#ifdef CONFIG_SCHED_SMT
        /*
         * When going down, decrement the number of cores with SMT present.
         */
-       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
-               static_branch_dec_cpuslocked(&sched_smt_present);
+       sched_smt_present_dec(cpu);
 
+#ifdef CONFIG_SCHED_SMT
        sched_core_cpu_deactivate(cpu);
 #endif