From: Greg Kroah-Hartman Date: Fri, 17 Oct 2025 13:26:45 +0000 (+0200) Subject: 6.12-stable patches X-Git-Tag: v5.15.195~23 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=2f5791601c3c7406b0e7f2f31e863bdc976508e4;p=thirdparty%2Fkernel%2Fstable-queue.git 6.12-stable patches added patches: sched-fair-block-delayed-tasks-on-throttled-hierarchy-during-dequeue.patch --- diff --git a/queue-6.12/sched-fair-block-delayed-tasks-on-throttled-hierarchy-during-dequeue.patch b/queue-6.12/sched-fair-block-delayed-tasks-on-throttled-hierarchy-during-dequeue.patch new file mode 100644 index 0000000000..d5308864fa --- /dev/null +++ b/queue-6.12/sched-fair-block-delayed-tasks-on-throttled-hierarchy-during-dequeue.patch @@ -0,0 +1,96 @@ +From kprateek.nayak@amd.com Fri Oct 17 15:25:45 2025 +From: K Prateek Nayak +Date: Wed, 15 Oct 2025 06:03:59 +0000 +Subject: sched/fair: Block delayed tasks on throttled hierarchy during dequeue +To: Greg Kroah-Hartman , Sasha Levin , , Matt Fleming , Ingo Molnar , Peter Zijlstra , Juri Lelli , Vincent Guittot , +Cc: Dietmar Eggemann , Steven Rostedt , Ben Segall , Mel Gorman , Valentin Schneider , , Matt Fleming , "Oleg Nesterov" , John Stultz , Chris Arges , K Prateek Nayak +Message-ID: <20251015060359.34722-1-kprateek.nayak@amd.com> + +From: K Prateek Nayak + +Dequeuing a fair task on a throttled hierarchy returns early on +encountering a throttled cfs_rq since the throttle path has already +dequeued the hierarchy above and has adjusted the h_nr_* accounting till +the root cfs_rq. + +dequeue_entities() crucially misses calling __block_task() for delayed +tasks being dequeued on the throttled hierarchies, but this was mostly +harmless until commit b7ca5743a260 ("sched/core: Tweak +wait_task_inactive() to force dequeue sched_delayed tasks") since all +existing cases would re-enqueue the task if task_on_rq_queued() returned +true and the task would eventually be blocked at pick after the +hierarchy was unthrottled. + +wait_task_inactive() is special as it expects the delayed task on +throttled hierarchy to reach the blocked state on dequeue but since +__block_task() is never called, task_on_rq_queued() continues to return +true. Furthermore, since the task is now off the hierarchy, the pick +never reaches it to fully block the task even after unthrottle leading +to wait_task_inactive() looping endlessly. + +Remedy this by calling __block_task() if a delayed task is being +dequeued on a throttled hierarchy. + +This fix is only required for stabled kernels implementing delay dequeue +(>= v6.12) before v6.18 since upstream commit e1fad12dcb66 ("sched/fair: +Switch to task based throttle model") indirectly fixes this by removing +the early return conditions in dequeue_entities() as part of the per-task +throttle feature. + +Cc: stable@vger.kernel.org +Reported-by: Matt Fleming +Closes: https://lore.kernel.org/all/20250925133310.1843863-1-matt@readmodwrite.com/ +Fixes: b7ca5743a260 ("sched/core: Tweak wait_task_inactive() to force dequeue sched_delayed tasks") +Tested-by: Matt Fleming +Signed-off-by: K Prateek Nayak +Signed-off-by: Greg Kroah-Hartman +--- + kernel/sched/fair.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -7187,6 +7187,7 @@ static int dequeue_entities(struct rq *r + int h_nr_delayed = 0; + struct cfs_rq *cfs_rq; + u64 slice = 0; ++ int ret = 0; + + if (entity_is_task(se)) { + p = task_of(se); +@@ -7218,7 +7219,7 @@ static int dequeue_entities(struct rq *r + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) +- return 0; ++ goto out; + + /* Don't dequeue parent if it has other entities besides us */ + if (cfs_rq->load.weight) { +@@ -7261,7 +7262,7 @@ static int dequeue_entities(struct rq *r + + /* end evaluation on encountering a throttled cfs_rq */ + if (cfs_rq_throttled(cfs_rq)) +- return 0; ++ goto out; + } + + sub_nr_running(rq, h_nr_queued); +@@ -7273,6 +7274,8 @@ static int dequeue_entities(struct rq *r + if (unlikely(!was_sched_idle && sched_idle_rq(rq))) + rq->next_balance = jiffies; + ++ ret = 1; ++out: + if (p && task_delayed) { + SCHED_WARN_ON(!task_sleep); + SCHED_WARN_ON(p->on_rq != 1); +@@ -7288,7 +7291,7 @@ static int dequeue_entities(struct rq *r + __block_task(rq, p); + } + +- return 1; ++ return ret; + } + + /* diff --git a/queue-6.12/series b/queue-6.12/series index 951af66de6..5bc91f66be 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -270,3 +270,4 @@ fs-add-initramfs_options-to-set-initramfs-mount-opti.patch cramfs-verify-inode-mode-when-loading-from-disk.patch writeback-avoid-softlockup-when-switching-many-inode.patch writeback-avoid-excessively-long-inode-switching-tim.patch +sched-fair-block-delayed-tasks-on-throttled-hierarchy-during-dequeue.patch