]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/fair: Fix entity's lag with run to parity
authorVincent Guittot <vincent.guittot@linaro.org>
Tue, 8 Jul 2025 16:56:29 +0000 (18:56 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 9 Jul 2025 11:40:23 +0000 (13:40 +0200)
When an entity is enqueued without preempting current, we must ensure
that the slice protection is updated to take into account the slice
duration of the newly enqueued task so that its lag will not exceed
its slice (+ tick).

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250708165630.1948751-6-vincent.guittot@linaro.org
kernel/sched/fair.c

index 45e057fc23546dd26e96847b7abee5d66b438cc6..1660960d64afef4aca0ea567edc87f933f9598a6 100644 (file)
@@ -889,13 +889,13 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
  * When run to parity is disabled, we give a minimum quantum to the running
  * entity to ensure progress.
  */
-static inline void set_protect_slice(struct sched_entity *se)
+static inline void set_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        u64 slice = normalized_sysctl_sched_base_slice;
        u64 vprot = se->deadline;
 
        if (sched_feat(RUN_TO_PARITY))
-               slice = cfs_rq_min_slice(cfs_rq_of(se));
+               slice = cfs_rq_min_slice(cfs_rq);
 
        slice = min(slice, se->slice);
        if (slice != se->slice)
@@ -904,6 +904,13 @@ static inline void set_protect_slice(struct sched_entity *se)
        se->vprot = vprot;
 }
 
+static inline void update_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       u64 slice = cfs_rq_min_slice(cfs_rq);
+
+       se->vprot = min_vruntime(se->vprot, se->vruntime + calc_delta_fair(slice, se));
+}
+
 static inline bool protect_slice(struct sched_entity *se)
 {
        return ((s64)(se->vprot - se->vruntime) > 0);
@@ -5467,7 +5474,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
                __dequeue_entity(cfs_rq, se);
                update_load_avg(cfs_rq, se, UPDATE_TG);
 
-               set_protect_slice(se);
+               set_protect_slice(cfs_rq, se);
        }
 
        update_stats_curr_start(cfs_rq, se);
@@ -8720,6 +8727,9 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
        if (__pick_eevdf(cfs_rq, !do_preempt_short) == pse)
                goto preempt;
 
+       if (sched_feat(RUN_TO_PARITY) && do_preempt_short)
+               update_protect_slice(cfs_rq, se);
+
        return;
 
 preempt: