]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/fair: Keep load_avg and load_sum synced
authorVincent Guittot <vincent.guittot@linaro.org>
Thu, 27 May 2021 12:29:15 +0000 (14:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Jun 2021 10:01:45 +0000 (12:01 +0200)
commit 7c7ad626d9a0ff0a36c1e2a3cfbbc6a13828d5eb upstream.

when removing a cfs_rq from the list we only check _sum value so we must
ensure that _avg and _sum stay synced so load_sum can't be null whereas
load_avg is not after propagating load in the cgroup hierarchy.

Use load_avg to compute load_sum similarly to what is done for util_sum
and runnable_sum.

Fixes: 0e2d2aaaae52 ("sched/fair: Rewrite PELT migration propagation")
Reported-by: Odin Ugedal <odin@uged.al>
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Odin Ugedal <odin@uged.al>
Link: https://lkml.kernel.org/r/20210527122916.27683-2-vincent.guittot@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/sched/fair.c

index 1ad0e52487f6b40e355e7982d67a66f4a491cc77..f07a581ded9533923cc604c5491ac3e313c714a4 100644 (file)
@@ -3501,10 +3501,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
        unsigned long load_avg;
        u64 load_sum = 0;
-       s64 delta_sum;
        u32 divider;
 
        if (!runnable_sum)
@@ -3551,13 +3550,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, divider);
 
-       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
-       delta_avg = load_avg - se->avg.load_avg;
+       delta = load_avg - se->avg.load_avg;
 
        se->avg.load_sum = runnable_sum;
        se->avg.load_avg = load_avg;
-       add_positive(&cfs_rq->avg.load_avg, delta_avg);
-       add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+       add_positive(&cfs_rq->avg.load_avg, delta);
+       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)