]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/core: Introduce sched_set_rq_on/offline() helper
authorYang Yingliang <yangyingliang@huawei.com>
Wed, 3 Jul 2024 03:16:09 +0000 (11:16 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Aug 2024 13:34:35 +0000 (15:34 +0200)
commit 2f027354122f58ee846468a6f6b48672fff92e9b upstream.

Introduce sched_set_rq_on/offline() helper, so it can be called
in normal or error path simply. No functional changed.

Cc: stable@kernel.org
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240703031610.587047-4-yangyingliang@huaweicloud.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/sched/core.c

index fc978aac090d5b0883bd04798b954021dd656d8c..9dee52b4fc7ffd46652d4093f9f2a5c50f227981 100644 (file)
@@ -9604,6 +9604,30 @@ void set_rq_offline(struct rq *rq)
        }
 }
 
+static inline void sched_set_rq_online(struct rq *rq, int cpu)
+{
+       struct rq_flags rf;
+
+       rq_lock_irqsave(rq, &rf);
+       if (rq->rd) {
+               BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+               set_rq_online(rq);
+       }
+       rq_unlock_irqrestore(rq, &rf);
+}
+
+static inline void sched_set_rq_offline(struct rq *rq, int cpu)
+{
+       struct rq_flags rf;
+
+       rq_lock_irqsave(rq, &rf);
+       if (rq->rd) {
+               BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+               set_rq_offline(rq);
+       }
+       rq_unlock_irqrestore(rq, &rf);
+}
+
 /*
  * used to mark begin/end of suspend/resume:
  */
@@ -9673,7 +9697,6 @@ static inline void sched_smt_present_dec(int cpu)
 int sched_cpu_activate(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       struct rq_flags rf;
 
        /*
         * Clear the balance_push callback and prepare to schedule
@@ -9702,12 +9725,7 @@ int sched_cpu_activate(unsigned int cpu)
         * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
         *    domains.
         */
-       rq_lock_irqsave(rq, &rf);
-       if (rq->rd) {
-               BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-               set_rq_online(rq);
-       }
-       rq_unlock_irqrestore(rq, &rf);
+       sched_set_rq_online(rq, cpu);
 
        return 0;
 }
@@ -9715,7 +9733,6 @@ int sched_cpu_activate(unsigned int cpu)
 int sched_cpu_deactivate(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       struct rq_flags rf;
        int ret;
 
        /*
@@ -9746,12 +9763,7 @@ int sched_cpu_deactivate(unsigned int cpu)
         */
        synchronize_rcu();
 
-       rq_lock_irqsave(rq, &rf);
-       if (rq->rd) {
-               BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-               set_rq_offline(rq);
-       }
-       rq_unlock_irqrestore(rq, &rf);
+       sched_set_rq_offline(rq, cpu);
 
        /*
         * When going down, decrement the number of cores with SMT present.