]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/eevdf: Fix HRTICK duration
authorPeter Zijlstra <peterz@infradead.org>
Tue, 24 Feb 2026 16:35:17 +0000 (17:35 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 27 Feb 2026 15:40:03 +0000 (16:40 +0100)
The nominal duration for an EEVDF task to run is until its deadline. At
which point the deadline is moved ahead and a new task selection is done.

Try and predict the time 'lost' to higher scheduling classes. Since this is
an estimate, the timer can be both early or late. In case it is early
task_tick_fair() will take the !need_resched() path and restarts the timer.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://patch.msgid.link/20260224163428.798198874@kernel.org
kernel/sched/fair.c

index eea99ec01a3fb25f2fc9f2a863c494c1f360e0a5..247fecd1ac41d4fb8639306f222ccd87730f5b99 100644 (file)
@@ -6735,21 +6735,37 @@ static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct
 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
+       unsigned long scale = 1024;
+       unsigned long util = 0;
+       u64 vdelta;
+       u64 delta;
 
        WARN_ON_ONCE(task_rq(p) != rq);
 
-       if (rq->cfs.h_nr_queued > 1) {
-               u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
-               u64 slice = se->slice;
-               s64 delta = slice - ran;
+       if (rq->cfs.h_nr_queued <= 1)
+               return;
 
-               if (delta < 0) {
-                       if (task_current_donor(rq, p))
-                               resched_curr(rq);
-                       return;
-               }
-               hrtick_start(rq, delta);
+       /*
+        * Compute time until virtual deadline
+        */
+       vdelta = se->deadline - se->vruntime;
+       if ((s64)vdelta < 0) {
+               if (task_current_donor(rq, p))
+                       resched_curr(rq);
+               return;
        }
+       delta = (se->load.weight * vdelta) / NICE_0_LOAD;
+
+       /*
+        * Correct for instantaneous load of other classes.
+        */
+       util += cpu_util_irq(rq);
+       if (util && util < 1024) {
+               scale *= 1024;
+               scale /= (1024 - util);
+       }
+
+       hrtick_start(rq, (scale * delta) / 1024);
 }
 
 /*
@@ -13365,11 +13381,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
                entity_tick(cfs_rq, se, queued);
        }
 
-       if (queued) {
-               if (!need_resched())
-                       hrtick_start_fair(rq, curr);
+       if (queued)
                return;
-       }
 
        if (static_branch_unlikely(&sched_numa_balancing))
                task_tick_numa(rq, curr);