]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | Subject: sched: rework wakeup preemption |
2 | From: Peter Zijlstra <a.p.zijlstra@chello.nl> | |
3 | References: 437171 - LTC47404 | |
4 | ||
5 | ||
6 | Rework the wakeup preemption to work on real runtime instead of | |
7 | the virtual runtime. This greatly simplifies the code. | |
8 | ||
9 | Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> | |
10 | Signed-off-by: Ingo Molnar <mingo@elte.hu> | |
11 | ||
12 | Signed-off-by: Olaf Hering <olh@suse.de> | |
13 | --- | |
14 | kernel/sched_fair.c | 133 +--------------------------------------------------- | |
15 | 1 file changed, 4 insertions(+), 129 deletions(-) | |
16 | ||
17 | --- a/kernel/sched_fair.c | |
18 | +++ b/kernel/sched_fair.c | |
19 | @@ -407,64 +407,6 @@ static u64 sched_vslice(struct cfs_rq *c | |
20 | } | |
21 | ||
22 | /* | |
23 | - * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in | |
24 | - * that it favours >=0 over <0. | |
25 | - * | |
26 | - * -20 | | |
27 | - * | | |
28 | - * 0 --------+------- | |
29 | - * .' | |
30 | - * 19 .' | |
31 | - * | |
32 | - */ | |
33 | -static unsigned long | |
34 | -calc_delta_asym(unsigned long delta, struct sched_entity *se) | |
35 | -{ | |
36 | - struct load_weight lw = { | |
37 | - .weight = NICE_0_LOAD, | |
38 | - .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT) | |
39 | - }; | |
40 | - | |
41 | - for_each_sched_entity(se) { | |
42 | - struct load_weight *se_lw = &se->load; | |
43 | - unsigned long rw = cfs_rq_of(se)->load.weight; | |
44 | - | |
45 | -#ifdef CONFIG_FAIR_SCHED_GROUP | |
46 | - struct cfs_rq *cfs_rq = se->my_q; | |
47 | - struct task_group *tg = NULL | |
48 | - | |
49 | - if (cfs_rq) | |
50 | - tg = cfs_rq->tg; | |
51 | - | |
52 | - if (tg && tg->shares < NICE_0_LOAD) { | |
53 | - /* | |
54 | - * scale shares to what it would have been had | |
55 | - * tg->weight been NICE_0_LOAD: | |
56 | - * | |
57 | - * weight = 1024 * shares / tg->weight | |
58 | - */ | |
59 | - lw.weight *= se->load.weight; | |
60 | - lw.weight /= tg->shares; | |
61 | - | |
62 | - lw.inv_weight = 0; | |
63 | - | |
64 | - se_lw = &lw; | |
65 | - rw += lw.weight - se->load.weight; | |
66 | - } else | |
67 | -#endif | |
68 | - | |
69 | - if (se->load.weight < NICE_0_LOAD) { | |
70 | - se_lw = &lw; | |
71 | - rw += NICE_0_LOAD - se->load.weight; | |
72 | - } | |
73 | - | |
74 | - delta = calc_delta_mine(delta, rw, se_lw); | |
75 | - } | |
76 | - | |
77 | - return delta; | |
78 | -} | |
79 | - | |
80 | -/* | |
81 | * Update the current task's runtime statistics. Skip current tasks that | |
82 | * are not in our scheduling class. | |
83 | */ | |
84 | @@ -1279,54 +1221,12 @@ static unsigned long wakeup_gran(struct | |
85 | * + nice tasks. | |
86 | */ | |
87 | if (sched_feat(ASYM_GRAN)) | |
88 | - gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); | |
89 | - else | |
90 | - gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | |
91 | + gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); | |
92 | ||
93 | return gran; | |
94 | } | |
95 | ||
96 | /* | |
97 | - * Should 'se' preempt 'curr'. | |
98 | - * | |
99 | - * |s1 | |
100 | - * |s2 | |
101 | - * |s3 | |
102 | - * g | |
103 | - * |<--->|c | |
104 | - * | |
105 | - * w(c, s1) = -1 | |
106 | - * w(c, s2) = 0 | |
107 | - * w(c, s3) = 1 | |
108 | - * | |
109 | - */ | |
110 | -static int | |
111 | -wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |
112 | -{ | |
113 | - s64 gran, vdiff = curr->vruntime - se->vruntime; | |
114 | - | |
115 | - if (vdiff < 0) | |
116 | - return -1; | |
117 | - | |
118 | - gran = wakeup_gran(curr); | |
119 | - if (vdiff > gran) | |
120 | - return 1; | |
121 | - | |
122 | - return 0; | |
123 | -} | |
124 | - | |
125 | -/* return depth at which a sched entity is present in the hierarchy */ | |
126 | -static inline int depth_se(struct sched_entity *se) | |
127 | -{ | |
128 | - int depth = 0; | |
129 | - | |
130 | - for_each_sched_entity(se) | |
131 | - depth++; | |
132 | - | |
133 | - return depth; | |
134 | -} | |
135 | - | |
136 | -/* | |
137 | * Preempt the current task with a newly woken task if needed: | |
138 | */ | |
139 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |
140 | @@ -1334,7 +1234,7 @@ static void check_preempt_wakeup(struct | |
141 | struct task_struct *curr = rq->curr; | |
142 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | |
143 | struct sched_entity *se = &curr->se, *pse = &p->se; | |
144 | - int se_depth, pse_depth; | |
145 | + s64 delta_exec; | |
146 | ||
147 | if (unlikely(rt_prio(p->prio))) { | |
148 | update_rq_clock(rq); | |
149 | @@ -1358,33 +1258,8 @@ static void check_preempt_wakeup(struct | |
150 | if (!sched_feat(WAKEUP_PREEMPT)) | |
151 | return; | |
152 | ||
153 | - /* | |
154 | - * preemption test can be made between sibling entities who are in the | |
155 | - * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | |
156 | - * both tasks until we find their ancestors who are siblings of common | |
157 | - * parent. | |
158 | - */ | |
159 | - | |
160 | - /* First walk up until both entities are at same depth */ | |
161 | - se_depth = depth_se(se); | |
162 | - pse_depth = depth_se(pse); | |
163 | - | |
164 | - while (se_depth > pse_depth) { | |
165 | - se_depth--; | |
166 | - se = parent_entity(se); | |
167 | - } | |
168 | - | |
169 | - while (pse_depth > se_depth) { | |
170 | - pse_depth--; | |
171 | - pse = parent_entity(pse); | |
172 | - } | |
173 | - | |
174 | - while (!is_same_group(se, pse)) { | |
175 | - se = parent_entity(se); | |
176 | - pse = parent_entity(pse); | |
177 | - } | |
178 | - | |
179 | - if (wakeup_preempt_entity(se, pse) == 1) | |
180 | + delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; | |
181 | + if (delta_exec > wakeup_gran(pse)) | |
182 | resched_task(curr); | |
183 | } | |
184 |