]>
Commit | Line | Data |
---|---|---|
9547e94c SL |
1 | From 43ce7d3e79e8d11c3be7c1f925371e7a596df5f6 Mon Sep 17 00:00:00 2001 |
2 | From: Yuyang Du <yuyang.du@intel.com> | |
3 | Date: Thu, 17 Dec 2015 07:34:27 +0800 | |
4 | Subject: sched/fair: Fix new task's load avg removed from source CPU in | |
5 | wake_up_new_task() | |
6 | ||
7 | [ Upstream commit 0905f04eb21fc1c2e690bed5d0418a061d56c225 ] | |
8 | ||
9 | If a newly created task is selected to go to a different CPU in fork | |
10 | balance when it wakes up the first time, its load averages should | |
11 | not be removed from the source CPU since they are never added to | |
12 | it before. The same is also applicable to a never used group entity. | |
13 | ||
14 | Fix it in remove_entity_load_avg(): when entity's last_update_time | |
15 | is 0, simply return. This should precisely identify the case in | |
16 | question, because in other migrations, the last_update_time is set | |
17 | to 0 after remove_entity_load_avg(). | |
18 | ||
19 | Reported-by: Steve Muckle <steve.muckle@linaro.org> | |
20 | Signed-off-by: Yuyang Du <yuyang.du@intel.com> | |
21 | [peterz: cfs_rq_last_update_time] | |
22 | Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> | |
23 | Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> | |
24 | Cc: Juri Lelli <Juri.Lelli@arm.com> | |
25 | Cc: Linus Torvalds <torvalds@linux-foundation.org> | |
26 | Cc: Mike Galbraith <efault@gmx.de> | |
27 | Cc: Morten Rasmussen <morten.rasmussen@arm.com> | |
28 | Cc: Patrick Bellasi <patrick.bellasi@arm.com> | |
29 | Cc: Peter Zijlstra <peterz@infradead.org> | |
30 | Cc: Thomas Gleixner <tglx@linutronix.de> | |
31 | Cc: Vincent Guittot <vincent.guittot@linaro.org> | |
32 | Link: http://lkml.kernel.org/r/20151216233427.GJ28098@intel.com | |
33 | Signed-off-by: Ingo Molnar <mingo@kernel.org> | |
34 | Signed-off-by: Sasha Levin <sashal@kernel.org> | |
35 | --- | |
36 | kernel/sched/fair.c | 38 ++++++++++++++++++++++++++++---------- | |
37 | 1 file changed, 28 insertions(+), 10 deletions(-) | |
38 | ||
39 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c | |
40 | index c2af250547bb..6051007918ad 100644 | |
41 | --- a/kernel/sched/fair.c | |
42 | +++ b/kernel/sched/fair.c | |
43 | @@ -2841,27 +2841,45 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) | |
44 | max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); | |
45 | } | |
46 | ||
47 | -/* | |
48 | - * Task first catches up with cfs_rq, and then subtract | |
49 | - * itself from the cfs_rq (task must be off the queue now). | |
50 | - */ | |
51 | -void remove_entity_load_avg(struct sched_entity *se) | |
52 | -{ | |
53 | - struct cfs_rq *cfs_rq = cfs_rq_of(se); | |
54 | - u64 last_update_time; | |
55 | - | |
56 | #ifndef CONFIG_64BIT | |
57 | +static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) | |
58 | +{ | |
59 | u64 last_update_time_copy; | |
60 | + u64 last_update_time; | |
61 | ||
62 | do { | |
63 | last_update_time_copy = cfs_rq->load_last_update_time_copy; | |
64 | smp_rmb(); | |
65 | last_update_time = cfs_rq->avg.last_update_time; | |
66 | } while (last_update_time != last_update_time_copy); | |
67 | + | |
68 | + return last_update_time; | |
69 | +} | |
70 | #else | |
71 | - last_update_time = cfs_rq->avg.last_update_time; | |
72 | +static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) | |
73 | +{ | |
74 | + return cfs_rq->avg.last_update_time; | |
75 | +} | |
76 | #endif | |
77 | ||
78 | +/* | |
79 | + * Task first catches up with cfs_rq, and then subtract | |
80 | + * itself from the cfs_rq (task must be off the queue now). | |
81 | + */ | |
82 | +void remove_entity_load_avg(struct sched_entity *se) | |
83 | +{ | |
84 | + struct cfs_rq *cfs_rq = cfs_rq_of(se); | |
85 | + u64 last_update_time; | |
86 | + | |
87 | + /* | |
88 | + * Newly created task or never used group entity should not be removed | |
89 | + * from its (source) cfs_rq | |
90 | + */ | |
91 | + if (se->avg.last_update_time == 0) | |
92 | + return; | |
93 | + | |
94 | + last_update_time = cfs_rq_last_update_time(cfs_rq); | |
95 | + | |
96 | __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); | |
97 | atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); | |
98 | atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); | |
99 | -- | |
100 | 2.19.1 | |
101 |