]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | Subject: sched: re-instate vruntime based wakeup preemption |
2 | From: Peter Zijlstra <a.p.zijlstra@chello.nl> | |
3 | References: 437171 - LTC47404 | |
4 | ||
5 | The advantage is that vruntime based wakeup preemption has a better | |
6 | conceptual model. Here wakeup_gran = 0 means: preempt when 'fair'. | |
7 | Therefore wakeup_gran is the granularity of unfairness we allow in order | |
8 | to make progress. | |
9 | ||
10 | Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> | |
11 | Signed-off-by: Olaf Hering <olh@suse.de> | |
12 | --- | |
13 | kernel/sched_fair.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++---- | |
14 | 1 file changed, 92 insertions(+), 6 deletions(-) | |
15 | ||
16 | --- a/kernel/sched_fair.c | |
17 | +++ b/kernel/sched_fair.c | |
18 | @@ -141,6 +141,49 @@ static inline struct sched_entity *paren | |
19 | return se->parent; | |
20 | } | |
21 | ||
22 | +/* return depth at which a sched entity is present in the hierarchy */ | |
23 | +static inline int depth_se(struct sched_entity *se) | |
24 | +{ | |
25 | + int depth = 0; | |
26 | + | |
27 | + for_each_sched_entity(se) | |
28 | + depth++; | |
29 | + | |
30 | + return depth; | |
31 | +} | |
32 | + | |
33 | +static void | |
34 | +find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |
35 | +{ | |
36 | + int se_depth, pse_depth; | |
37 | + | |
38 | + /* | |
39 | + * preemption test can be made between sibling entities who are in the | |
40 | + * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | |
41 | + * both tasks until we find their ancestors who are siblings of common | |
42 | + * parent. | |
43 | + */ | |
44 | + | |
45 | + /* First walk up until both entities are at same depth */ | |
46 | + se_depth = depth_se(*se); | |
47 | + pse_depth = depth_se(*pse); | |
48 | + | |
49 | + while (se_depth > pse_depth) { | |
50 | + se_depth--; | |
51 | + *se = parent_entity(*se); | |
52 | + } | |
53 | + | |
54 | + while (pse_depth > se_depth) { | |
55 | + pse_depth--; | |
56 | + *pse = parent_entity(*pse); | |
57 | + } | |
58 | + | |
59 | + while (!is_same_group(*se, *pse)) { | |
60 | + *se = parent_entity(*se); | |
61 | + *pse = parent_entity(*pse); | |
62 | + } | |
63 | +} | |
64 | + | |
65 | #else /* CONFIG_FAIR_GROUP_SCHED */ | |
66 | ||
67 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | |
68 | @@ -191,6 +234,11 @@ static inline struct sched_entity *paren | |
69 | return NULL; | |
70 | } | |
71 | ||
72 | +static inline void | |
73 | +find_matching_se(struct sched_entity **se, struct sched_entity **pse) | |
74 | +{ | |
75 | +} | |
76 | + | |
77 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | |
78 | ||
79 | ||
80 | @@ -1220,13 +1268,42 @@ static unsigned long wakeup_gran(struct | |
81 | * More easily preempt - nice tasks, while not making it harder for | |
82 | * + nice tasks. | |
83 | */ | |
84 | - if (sched_feat(ASYM_GRAN)) | |
85 | - gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); | |
86 | + if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) | |
87 | + gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | |
88 | ||
89 | return gran; | |
90 | } | |
91 | ||
92 | /* | |
93 | + * Should 'se' preempt 'curr'. | |
94 | + * | |
95 | + * |s1 | |
96 | + * |s2 | |
97 | + * |s3 | |
98 | + * g | |
99 | + * |<--->|c | |
100 | + * | |
101 | + * w(c, s1) = -1 | |
102 | + * w(c, s2) = 0 | |
103 | + * w(c, s3) = 1 | |
104 | + * | |
105 | + */ | |
106 | +static int | |
107 | +wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |
108 | +{ | |
109 | + s64 gran, vdiff = curr->vruntime - se->vruntime; | |
110 | + | |
111 | + if (vdiff <= 0) | |
112 | + return -1; | |
113 | + | |
114 | + gran = wakeup_gran(curr); | |
115 | + if (vdiff > gran) | |
116 | + return 1; | |
117 | + | |
118 | + return 0; | |
119 | +} | |
120 | + | |
121 | +/* | |
122 | * Preempt the current task with a newly woken task if needed: | |
123 | */ | |
124 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |
125 | @@ -1234,7 +1311,6 @@ static void check_preempt_wakeup(struct | |
126 | struct task_struct *curr = rq->curr; | |
127 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | |
128 | struct sched_entity *se = &curr->se, *pse = &p->se; | |
129 | - s64 delta_exec; | |
130 | ||
131 | if (unlikely(rt_prio(p->prio))) { | |
132 | update_rq_clock(rq); | |
133 | @@ -1258,9 +1334,19 @@ static void check_preempt_wakeup(struct | |
134 | if (!sched_feat(WAKEUP_PREEMPT)) | |
135 | return; | |
136 | ||
137 | - delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; | |
138 | - if (delta_exec > wakeup_gran(pse)) | |
139 | - resched_task(curr); | |
140 | + find_matching_se(&se, &pse); | |
141 | + | |
142 | + while (se) { | |
143 | + BUG_ON(!pse); | |
144 | + | |
145 | + if (wakeup_preempt_entity(se, pse) == 1) { | |
146 | + resched_task(curr); | |
147 | + break; | |
148 | + } | |
149 | + | |
150 | + se = parent_entity(se); | |
151 | + pse = parent_entity(pse); | |
152 | + } | |
153 | } | |
154 | ||
155 | static struct task_struct *pick_next_task_fair(struct rq *rq) |