]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-4.4/sched-fair-fix-new-task-s-load-avg-removed-from-sour.patch
4.4-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.4 / sched-fair-fix-new-task-s-load-avg-removed-from-sour.patch
CommitLineData
9547e94c
SL
1From 43ce7d3e79e8d11c3be7c1f925371e7a596df5f6 Mon Sep 17 00:00:00 2001
2From: Yuyang Du <yuyang.du@intel.com>
3Date: Thu, 17 Dec 2015 07:34:27 +0800
4Subject: sched/fair: Fix new task's load avg removed from source CPU in
5 wake_up_new_task()
6
7[ Upstream commit 0905f04eb21fc1c2e690bed5d0418a061d56c225 ]
8
9If a newly created task is selected to go to a different CPU in fork
10balance when it wakes up the first time, its load averages should
11not be removed from the source CPU since they are never added to
12it before. The same is also applicable to a never used group entity.
13
14Fix it in remove_entity_load_avg(): when entity's last_update_time
15is 0, simply return. This should precisely identify the case in
16question, because in other migrations, the last_update_time is set
17to 0 after remove_entity_load_avg().
18
19Reported-by: Steve Muckle <steve.muckle@linaro.org>
20Signed-off-by: Yuyang Du <yuyang.du@intel.com>
21[peterz: cfs_rq_last_update_time]
22Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
23Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
24Cc: Juri Lelli <Juri.Lelli@arm.com>
25Cc: Linus Torvalds <torvalds@linux-foundation.org>
26Cc: Mike Galbraith <efault@gmx.de>
27Cc: Morten Rasmussen <morten.rasmussen@arm.com>
28Cc: Patrick Bellasi <patrick.bellasi@arm.com>
29Cc: Peter Zijlstra <peterz@infradead.org>
30Cc: Thomas Gleixner <tglx@linutronix.de>
31Cc: Vincent Guittot <vincent.guittot@linaro.org>
32Link: http://lkml.kernel.org/r/20151216233427.GJ28098@intel.com
33Signed-off-by: Ingo Molnar <mingo@kernel.org>
34Signed-off-by: Sasha Levin <sashal@kernel.org>
35---
36 kernel/sched/fair.c | 38 ++++++++++++++++++++++++++++----------
37 1 file changed, 28 insertions(+), 10 deletions(-)
38
39diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
40index c2af250547bb..6051007918ad 100644
41--- a/kernel/sched/fair.c
42+++ b/kernel/sched/fair.c
43@@ -2841,27 +2841,45 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
44 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
45 }
46
47-/*
48- * Task first catches up with cfs_rq, and then subtract
49- * itself from the cfs_rq (task must be off the queue now).
50- */
51-void remove_entity_load_avg(struct sched_entity *se)
52-{
53- struct cfs_rq *cfs_rq = cfs_rq_of(se);
54- u64 last_update_time;
55-
56 #ifndef CONFIG_64BIT
57+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
58+{
59 u64 last_update_time_copy;
60+ u64 last_update_time;
61
62 do {
63 last_update_time_copy = cfs_rq->load_last_update_time_copy;
64 smp_rmb();
65 last_update_time = cfs_rq->avg.last_update_time;
66 } while (last_update_time != last_update_time_copy);
67+
68+ return last_update_time;
69+}
70 #else
71- last_update_time = cfs_rq->avg.last_update_time;
72+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
73+{
74+ return cfs_rq->avg.last_update_time;
75+}
76 #endif
77
78+/*
79+ * Task first catches up with cfs_rq, and then subtract
80+ * itself from the cfs_rq (task must be off the queue now).
81+ */
82+void remove_entity_load_avg(struct sched_entity *se)
83+{
84+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
85+ u64 last_update_time;
86+
87+ /*
88+ * Newly created task or never used group entity should not be removed
89+ * from its (source) cfs_rq
90+ */
91+ if (se->avg.last_update_time == 0)
92+ return;
93+
94+ last_update_time = cfs_rq_last_update_time(cfs_rq);
95+
96 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
97 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
98 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
99--
1002.19.1
101