From 0212696a844631a923aa6cedd74ebbb3cf434e51 Mon Sep 17 00:00:00 2001 From: Xuewen Yan Date: Thu, 17 Apr 2025 12:34:56 +0800 Subject: [PATCH] sched/util_est: Simplify condition for util_est_{en,de}queue() To prevent double enqueue/dequeue of the util-est for sched_delayed tasks, commit 729288bc6856 ("kernel/sched: Fix util_est accounting for DELAY_DEQUEUE") added the corresponding check. This check excludes double en/dequeue during task migration and priority changes. In fact, these conditions can be simplified. For util_est_dequeue, we know that sched_delayed flag is set in dequeue_entity. When the task is sleeping, we need to call util_est_dequeue to subtract util-est from the cfs_rq. At this point, sched_delayed has not yet been set. If we find that sched_delayed is already set, it indicates that this task has already called dequeue_task_fair once. In this case, there is no need to call util_est_dequeue again. Therefore, simply checking the sched_delayed flag should be sufficient to prevent unnecessary util_est updates during the dequeue. For util_est_enqueue, our goal is to add the util_est to the cfs_rq when task enqueue. However, we don't want to add the util_est of a sched_delayed task to the cfs_rq because the task is sleeping. Therefore, we can exclude the util_est_enqueue for sched_delayed tasks by checking the sched_delayed flag. However, when waking up a delayed task, the sched_delayed flag is cleared after util_est_enqueue. As a result, if we only check the sched_delayed flag, we would miss the util_est_enqueue. Since waking up a sched_delayed task calls enqueue_task with the ENQUEUE_DELAYED flag, we can determine whether to call util_est_enqueue by checking if the enqueue_flag contains ENQUEUE_DELAYED. Signed-off-by: Xuewen Yan Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Reviewed-by: Dietmar Eggemann Tested-by: Dietmar Eggemann Link: https://lore.kernel.org/r/20250417043457.10632-2-xuewen.yan@unisoc.com --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b00f16700f9c5..a028c294aef31 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6936,7 +6936,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) * Let's add the task's estimated utilization to the cfs_rq's * estimated utilization, before we update schedutil. */ - if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) + if (!p->se.sched_delayed || (flags & ENQUEUE_DELAYED)) util_est_enqueue(&rq->cfs, p); if (flags & ENQUEUE_DELAYED) { @@ -7178,7 +7178,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) */ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { - if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) + if (!p->se.sched_delayed) util_est_dequeue(&rq->cfs, p); util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP); -- 2.47.2