]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched/fair: Use full weight to __calc_delta()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 11 Feb 2026 16:07:58 +0000 (17:07 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 23 Feb 2026 17:04:10 +0000 (18:04 +0100)
Since we now use the full weight for avg_vruntime(), also make
__calc_delta() use the full value.

Since weight is effectively NICE_0_LOAD, this is 20 bits on 64bit.
This leaves 44 bits for delta_exec, which is ~16k seconds, way longer
than any one tick would ever be, so no worry about overflow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
Link: https://patch.msgid.link/20260219080625.183283814%40infradead.org
kernel/sched/fair.c

index 2b98054cd7548dde372985af51ba91f6e919b58d..23315c294da1589c7ddfa9a3c5b93cadcc08ebc8 100644 (file)
@@ -225,6 +225,7 @@ void __init sched_init_granularity(void)
        update_sysctl();
 }
 
+#ifndef CONFIG_64BIT
 #define WMULT_CONST    (~0U)
 #define WMULT_SHIFT    32
 
@@ -283,6 +284,12 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
 
        return mul_u64_u32_shr(delta_exec, fact, shift);
 }
+#else
+static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
+{
+       return (delta_exec * weight) / lw->weight;
+}
+#endif
 
 /*
  * delta /= w