]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched: Fix proxy/current (push,pull)ability
authorValentin Schneider <valentin.schneider@arm.com>
Sat, 12 Jul 2025 03:33:48 +0000 (03:33 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 14 Jul 2025 15:16:33 +0000 (17:16 +0200)
Proxy execution forms atomic pairs of tasks: The waiting donor
task (scheduling context) and a proxy (execution context). The
donor task, along with the rest of the blocked chain, follows
the proxy wrt CPU placement.

They can be the same task, in which case push/pull doesn't need any
modification. When they are different, however,
FIFO1 & FIFO42:

      ,->  RT42
      |     | blocked-on
      |     v
blocked_donor |   mutex
      |     | owner
      |     v
      `--  RT1

   RT1
   RT42

  CPU0            CPU1
   ^                ^
   |                |
  overloaded    !overloaded
  rq prio = 42  rq prio = 0

RT1 is eligible to be pushed to CPU1, but should that happen it will
"carry" RT42 along. Clearly here neither RT1 nor RT42 must be seen as
push/pullable.

Unfortunately, only the donor task is usually dequeued from the rq,
and the proxy'ed execution context (rq->curr) remains on the rq.
This can cause RT1 to be selected for migration from logic like the
rt pushable_list.

Thus, adda a dequeue/enqueue cycle on the proxy task before __schedule
returns, which allows the sched class logic to avoid adding the now
current task to the pushable_list.

Furthermore, tasks becoming blocked on a mutex don't need an explicit
dequeue/enqueue cycle to be made (push/pull)able: they have to be running
to block on a mutex, thus they will eventually hit put_prev_task().

Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Connor O'Brien <connoro@google.com>
Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lkml.kernel.org/r/20250712033407.2383110-8-jstultz@google.com
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/rt.c

index cb55d4247e65efd9e706edfb5a16640ba1ad2f41..a0b11201a7b4e903362011e475933c969b46a235 100644 (file)
@@ -6654,6 +6654,23 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
 }
 #endif /* SCHED_PROXY_EXEC */
 
+static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
+{
+       if (!sched_proxy_exec())
+               return;
+       /*
+        * pick_next_task() calls set_next_task() on the chosen task
+        * at some point, which ensures it is not push/pullable.
+        * However, the chosen/donor task *and* the mutex owner form an
+        * atomic pair wrt push/pull.
+        *
+        * Make sure owner we run is not pushable. Unfortunately we can
+        * only deal with that by means of a dequeue/enqueue cycle. :-/
+        */
+       dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
+       enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
+}
+
 /*
  * __schedule() is the main scheduler function.
  *
@@ -6798,6 +6815,10 @@ picked:
                 * changes to task_struct made by pick_next_task().
                 */
                RCU_INIT_POINTER(rq->curr, next);
+
+               if (!task_current_donor(rq, next))
+                       proxy_tag_curr(rq, next);
+
                /*
                 * The membarrier system call requires each architecture
                 * to have a full memory barrier after updating
@@ -6832,6 +6853,10 @@ picked:
                /* Also unlocks the rq: */
                rq = context_switch(rq, prev, next, &rf);
        } else {
+               /* In case next was already curr but just got blocked_donor */
+               if (!task_current_donor(rq, next))
+                       proxy_tag_curr(rq, next);
+
                rq_unpin_lock(rq, &rf);
                __balance_callbacks(rq);
                raw_spin_rq_unlock_irq(rq);
index 1af06e48227ddf960bb66d24763015dca35c505e..e2d51f4306b31fbf895d355c941426dd0d078f27 100644 (file)
@@ -2121,6 +2121,9 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
        if (dl_server(&p->dl))
                return;
 
+       if (task_is_blocked(p))
+               return;
+
        if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
 }
@@ -2415,6 +2418,10 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_s
        update_curr_dl(rq);
 
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
+
+       if (task_is_blocked(p))
+               return;
+
        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
 }
index be6e9bcbe82b6f03a1da0fe4bdcfe2c2598b190d..7936d43337313cf25c972cd61af4920edc733b2e 100644 (file)
@@ -1440,6 +1440,9 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        enqueue_rt_entity(rt_se, flags);
 
+       if (task_is_blocked(p))
+               return;
+
        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
 }
@@ -1716,6 +1719,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_s
 
        update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
 
+       if (task_is_blocked(p))
+               return;
        /*
         * The previous task needs to be made eligible for pushing
         * if it is still active