]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Rename cfs_rq::avg_vruntime to ::sum_w_vruntime, and helper functions
authorIngo Molnar <mingo@kernel.org>
Tue, 2 Dec 2025 15:09:23 +0000 (16:09 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 15 Dec 2025 06:52:44 +0000 (07:52 +0100)
The ::avg_vruntime field is a  misnomer: it says it's an
'average vruntime', but in reality it's the momentary sum
of the weighted vruntimes of all queued tasks, which is
at least a division away from being an average.

This is clear from comments about the math of fair scheduling:

    * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime

This confusion is increased by the cfs_avg_vruntime() function,
which does perform the division and returns a true average.

The sum of all weighted vruntimes should be named thusly,
so rename the field to ::sum_w_vruntime. (As arguably
::sum_weighted_vruntime would be a bit of a mouthful.)

Understanding the scheduler is hard enough already, without
extra layers of obfuscated naming. ;-)

Also rename related helper functions:

  sum_vruntime_add()    => sum_w_vruntime_add()
  sum_vruntime_sub()    => sum_w_vruntime_sub()
  sum_vruntime_update() => sum_w_vruntime_update()

With the notable exception of cfs_avg_vruntime(), which
was named accurately.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20251201064647.1851919-7-mingo@kernel.org
kernel/sched/fair.c
kernel/sched/sched.h

index 65b1065f9b21c09f6f2ad7cec0e2c6b739dafcc7..dcbd995de46d18bab25a75a9a107a90384cd47d9 100644 (file)
@@ -607,7 +607,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
  * Which we track using:
  *
  *                    v0 := cfs_rq->zero_vruntime
- * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+ * \Sum (v_i - v0) * w_i := cfs_rq->sum_w_vruntime
  *              \Sum w_i := cfs_rq->sum_weight
  *
  * Since zero_vruntime closely tracks the per-task service, these
@@ -619,32 +619,32 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
  * As measured, the max (key * weight) value was ~44 bits for a kernel build.
  */
 static void
-avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+sum_w_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        unsigned long weight = scale_load_down(se->load.weight);
        s64 key = entity_key(cfs_rq, se);
 
-       cfs_rq->avg_vruntime += key * weight;
+       cfs_rq->sum_w_vruntime += key * weight;
        cfs_rq->sum_weight += weight;
 }
 
 static void
-avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+sum_w_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        unsigned long weight = scale_load_down(se->load.weight);
        s64 key = entity_key(cfs_rq, se);
 
-       cfs_rq->avg_vruntime -= key * weight;
+       cfs_rq->sum_w_vruntime -= key * weight;
        cfs_rq->sum_weight -= weight;
 }
 
 static inline
-void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+void sum_w_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
 {
        /*
-        * v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight
+        * v' = v + d ==> sum_w_vruntime' = sum_runtime - d*sum_weight
         */
-       cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta;
+       cfs_rq->sum_w_vruntime -= cfs_rq->sum_weight * delta;
 }
 
 /*
@@ -654,7 +654,7 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
 u64 avg_vruntime(struct cfs_rq *cfs_rq)
 {
        struct sched_entity *curr = cfs_rq->curr;
-       s64 avg = cfs_rq->avg_vruntime;
+       s64 avg = cfs_rq->sum_w_vruntime;
        long load = cfs_rq->sum_weight;
 
        if (curr && curr->on_rq) {
@@ -722,7 +722,7 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
 {
        struct sched_entity *curr = cfs_rq->curr;
-       s64 avg = cfs_rq->avg_vruntime;
+       s64 avg = cfs_rq->sum_w_vruntime;
        long load = cfs_rq->sum_weight;
 
        if (curr && curr->on_rq) {
@@ -745,7 +745,7 @@ static void update_zero_vruntime(struct cfs_rq *cfs_rq)
        u64 vruntime = avg_vruntime(cfs_rq);
        s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
 
-       avg_vruntime_update(cfs_rq, delta);
+       sum_w_vruntime_update(cfs_rq, delta);
 
        cfs_rq->zero_vruntime = vruntime;
 }
@@ -819,7 +819,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-       avg_vruntime_add(cfs_rq, se);
+       sum_w_vruntime_add(cfs_rq, se);
        update_zero_vruntime(cfs_rq);
        se->min_vruntime = se->vruntime;
        se->min_slice = se->slice;
@@ -831,7 +831,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
                                  &min_vruntime_cb);
-       avg_vruntime_sub(cfs_rq, se);
+       sum_w_vruntime_sub(cfs_rq, se);
        update_zero_vruntime(cfs_rq);
 }
 
index 3334aa53542367d311c6b126eedb0959b0336081..ab1bfa05e8941c3e58d7114cb7d91c0310360c1a 100644 (file)
@@ -678,7 +678,7 @@ struct cfs_rq {
        unsigned int            h_nr_runnable;          /* SCHED_{NORMAL,BATCH,IDLE} */
        unsigned int            h_nr_idle;              /* SCHED_IDLE */
 
-       s64                     avg_vruntime;
+       s64                     sum_w_vruntime;
        u64                     sum_weight;
 
        u64                     zero_vruntime;