]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched: Split out __schedule() deactivate task logic into a helper
authorJohn Stultz <jstultz@google.com>
Wed, 9 Oct 2024 23:53:39 +0000 (16:53 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 14 Oct 2024 10:52:41 +0000 (12:52 +0200)
As we're going to re-use the deactivation logic,
split it into a helper.

Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Metin Kaya <metin.kaya@arm.com>
Reviewed-by: Qais Yousef <qyousef@layalina.io>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Tested-by: Metin Kaya <metin.kaya@arm.com>
Link: https://lore.kernel.org/r/20241009235352.1614323-7-jstultz@google.com
kernel/sched/core.c

index ab0b775a292dac841daeddcd565737a3c5e60802..b534de6e543a41e8f6ca6df258b140686796944a 100644 (file)
@@ -6490,6 +6490,45 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 #define SM_PREEMPT             1
 #define SM_RTLOCK_WAIT         2
 
+/*
+ * Helper function for __schedule()
+ *
+ * If a task does not have signals pending, deactivate it
+ * Otherwise marks the task's __state as RUNNING
+ */
+static bool try_to_block_task(struct rq *rq, struct task_struct *p,
+                             unsigned long task_state)
+{
+       int flags = DEQUEUE_NOCLOCK;
+
+       if (signal_pending_state(task_state, p)) {
+               WRITE_ONCE(p->__state, TASK_RUNNING);
+               return false;
+       }
+
+       p->sched_contributes_to_load =
+               (task_state & TASK_UNINTERRUPTIBLE) &&
+               !(task_state & TASK_NOLOAD) &&
+               !(task_state & TASK_FROZEN);
+
+       if (unlikely(is_special_task_state(task_state)))
+               flags |= DEQUEUE_SPECIAL;
+
+       /*
+        * __schedule()                 ttwu()
+        *   prev_state = prev->state;    if (p->on_rq && ...)
+        *   if (prev_state)                goto out;
+        *     p->on_rq = 0;              smp_acquire__after_ctrl_dep();
+        *                                p->state = TASK_WAKING
+        *
+        * Where __schedule() and ttwu() have matching control dependencies.
+        *
+        * After this, schedule() must not care about p->state any more.
+        */
+       block_task(rq, p, flags);
+       return true;
+}
+
 /*
  * __schedule() is the main scheduler function.
  *
@@ -6598,33 +6637,7 @@ static void __sched notrace __schedule(int sched_mode)
                        goto picked;
                }
        } else if (!preempt && prev_state) {
-               if (signal_pending_state(prev_state, prev)) {
-                       WRITE_ONCE(prev->__state, TASK_RUNNING);
-               } else {
-                       int flags = DEQUEUE_NOCLOCK;
-
-                       prev->sched_contributes_to_load =
-                               (prev_state & TASK_UNINTERRUPTIBLE) &&
-                               !(prev_state & TASK_NOLOAD) &&
-                               !(prev_state & TASK_FROZEN);
-
-                       if (unlikely(is_special_task_state(prev_state)))
-                               flags |= DEQUEUE_SPECIAL;
-
-                       /*
-                        * __schedule()                 ttwu()
-                        *   prev_state = prev->state;    if (p->on_rq && ...)
-                        *   if (prev_state)                goto out;
-                        *     p->on_rq = 0;              smp_acquire__after_ctrl_dep();
-                        *                                p->state = TASK_WAKING
-                        *
-                        * Where __schedule() and ttwu() have matching control dependencies.
-                        *
-                        * After this, schedule() must not care about p->state any more.
-                        */
-                       block_task(rq, prev, flags);
-                       block = true;
-               }
+               block = try_to_block_task(rq, prev, prev_state);
                switch_count = &prev->nvcsw;
        }