From: Vincent Guittot Date: Tue, 8 Jul 2025 16:56:26 +0000 (+0200) Subject: sched/fair: Fix NO_RUN_TO_PARITY case X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=74eec63661d46a7153d04c2e0249eeb76cc76d44;p=thirdparty%2Flinux.git sched/fair: Fix NO_RUN_TO_PARITY case EEVDF expects the scheduler to allocate a time quantum to the selected entity and then pick a new entity for next quantum. Although this notion of time quantum is not strictly doable in our case, we can ensure a minimum runtime for each task most of the time and pick a new entity after a minimum time has elapsed. Reuse the slice protection of run to parity to ensure such runtime quantum. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20250708165630.1948751-3-vincent.guittot@linaro.org --- diff --git a/include/linux/sched.h b/include/linux/sched.h index 4802fcf738cde..55921385927d8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -583,7 +583,15 @@ struct sched_entity { u64 sum_exec_runtime; u64 prev_sum_exec_runtime; u64 vruntime; - s64 vlag; + union { + /* + * When !@on_rq this field is vlag. + * When cfs_rq->curr == se (which implies @on_rq) + * this field is vprot. See protect_slice(). + */ + s64 vlag; + u64 vprot; + }; u64 slice; u64 nr_migrations; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 43fe5c831dd59..8d288df79a6bd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -882,23 +882,35 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) } /* - * HACK, stash a copy of deadline at the point of pick in vlag, - * which isn't used until dequeue. + * Set the vruntime up to which an entity can run before looking + * for another entity to pick. + * In case of run to parity, we protect the entity up to its deadline. + * When run to parity is disabled, we give a minimum quantum to the running + * entity to ensure progress. */ static inline void set_protect_slice(struct sched_entity *se) { - se->vlag = se->deadline; + u64 slice = se->slice; + u64 vprot = se->deadline; + + if (!sched_feat(RUN_TO_PARITY)) + slice = min(slice, normalized_sysctl_sched_base_slice); + + if (slice != se->slice) + vprot = min_vruntime(vprot, se->vruntime + calc_delta_fair(slice, se)); + + se->vprot = vprot; } static inline bool protect_slice(struct sched_entity *se) { - return se->vlag == se->deadline; + return ((s64)(se->vprot - se->vruntime) > 0); } static inline void cancel_protect_slice(struct sched_entity *se) { if (protect_slice(se)) - se->vlag = se->deadline + 1; + se->vprot = se->vruntime; } /* @@ -937,7 +949,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) curr = NULL; - if (sched_feat(RUN_TO_PARITY) && curr && protect_slice(curr)) + if (curr && protect_slice(curr)) return curr; /* Pick the leftmost entity if it's eligible */ @@ -1156,11 +1168,8 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec) cgroup_account_cputime(p, delta_exec); } -static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr) +static inline bool resched_next_slice(struct cfs_rq *cfs_rq, struct sched_entity *curr) { - if (!sched_feat(PREEMPT_SHORT)) - return false; - if (protect_slice(curr)) return false; @@ -1248,7 +1257,7 @@ static void update_curr(struct cfs_rq *cfs_rq) if (cfs_rq->nr_queued == 1) return; - if (resched || did_preempt_short(cfs_rq, curr)) { + if (resched || resched_next_slice(cfs_rq, curr)) { resched_curr_lazy(rq); clear_buddies(cfs_rq, curr); }