]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/proxy: Yield the donor task
authorFernand Sieber <sieberf@amazon.com>
Thu, 6 Nov 2025 10:40:10 +0000 (12:40 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 11 Nov 2025 11:33:36 +0000 (12:33 +0100)
When executing a task in proxy context, handle yields as if they were
requested by the donor task. This matches the traditional PI semantics
of yield() as well.

This avoids scenario like proxy task yielding, pick next task selecting the
same previous blocked donor, running the proxy task again, etc.

Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202510211205.1e0f5223-lkp@intel.com
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Fernand Sieber <sieberf@amazon.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251106104022.195157-1-sieberf@amazon.com
kernel/sched/deadline.c
kernel/sched/ext.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/syscalls.c

index 6b8a9286e2fca277883763da48b1dab073805b5f..13112c680f922886e1caaed807b471cec766a028 100644 (file)
@@ -2143,7 +2143,7 @@ static void yield_task_dl(struct rq *rq)
         * it and the bandwidth timer will wake it up and will give it
         * new scheduling parameters (thanks to dl_yielded=1).
         */
-       rq->curr->dl.dl_yielded = 1;
+       rq->donor->dl.dl_yielded = 1;
 
        update_rq_clock(rq);
        update_curr_dl(rq);
index b063444dc003f93bfd14fad4020e53fd6800e0ce..224b72c8e8da9478c393e8d1d56e48cf280dbe8f 100644 (file)
@@ -1474,7 +1474,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
 static void yield_task_scx(struct rq *rq)
 {
        struct scx_sched *sch = scx_root;
-       struct task_struct *p = rq->curr;
+       struct task_struct *p = rq->donor;
 
        if (SCX_HAS_OP(sch, yield))
                SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
@@ -1485,7 +1485,7 @@ static void yield_task_scx(struct rq *rq)
 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
 {
        struct scx_sched *sch = scx_root;
-       struct task_struct *from = rq->curr;
+       struct task_struct *from = rq->donor;
 
        if (SCX_HAS_OP(sch, yield))
                return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
index 273e2871b59e70dbe36251645a2bc9587b7dc314..f1d8eb350f595fa1f764963d5bfe77284d7b3846 100644 (file)
@@ -8980,7 +8980,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct t
  */
 static void yield_task_fair(struct rq *rq)
 {
-       struct task_struct *curr = rq->curr;
+       struct task_struct *curr = rq->donor;
        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
        struct sched_entity *se = &curr->se;
 
index 1fd97f2d7ec628d78dd82989176117d92c4c8490..f1867fe8e5c5353167b8cfa29cf0650956fd8a75 100644 (file)
@@ -1490,7 +1490,7 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 
 static void yield_task_rt(struct rq *rq)
 {
-       requeue_task_rt(rq, rq->curr, 0);
+       requeue_task_rt(rq, rq->donor, 0);
 }
 
 static int find_lowest_rq(struct task_struct *task);
index 8f0f603b530bef3cba3ba54037ad6716f3f50eb1..807879131add8590ef76aaa5d6254c574186b590 100644 (file)
@@ -1319,7 +1319,7 @@ static void do_sched_yield(void)
        rq = this_rq_lock_irq(&rf);
 
        schedstat_inc(rq->yld_count);
-       current->sched_class->yield_task(rq);
+       rq->donor->sched_class->yield_task(rq);
 
        preempt_disable();
        rq_unlock_irq(rq, &rf);
@@ -1388,12 +1388,13 @@ EXPORT_SYMBOL(yield);
  */
 int __sched yield_to(struct task_struct *p, bool preempt)
 {
-       struct task_struct *curr = current;
+       struct task_struct *curr;
        struct rq *rq, *p_rq;
        int yielded = 0;
 
        scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
                rq = this_rq();
+               curr = rq->donor;
 
 again:
                p_rq = task_rq(p);