]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - kernel/sched/rt.c
sched/cputime: Fix clock_nanosleep()/clock_gettime() inconsistency
[thirdparty/kernel/stable.git] / kernel / sched / rt.c
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82 /* We start is dequeued state, because no RT tasks are queued */
83 rt_rq->rt_queued = 0;
84
85 rt_rq->rt_time = 0;
86 rt_rq->rt_throttled = 0;
87 rt_rq->rt_runtime = 0;
88 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
89 }
90
91 #ifdef CONFIG_RT_GROUP_SCHED
92 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
93 {
94 hrtimer_cancel(&rt_b->rt_period_timer);
95 }
96
97 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
98
99 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
100 {
101 #ifdef CONFIG_SCHED_DEBUG
102 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
103 #endif
104 return container_of(rt_se, struct task_struct, rt);
105 }
106
107 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
108 {
109 return rt_rq->rq;
110 }
111
112 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
113 {
114 return rt_se->rt_rq;
115 }
116
117 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
118 {
119 struct rt_rq *rt_rq = rt_se->rt_rq;
120
121 return rt_rq->rq;
122 }
123
124 void free_rt_sched_group(struct task_group *tg)
125 {
126 int i;
127
128 if (tg->rt_se)
129 destroy_rt_bandwidth(&tg->rt_bandwidth);
130
131 for_each_possible_cpu(i) {
132 if (tg->rt_rq)
133 kfree(tg->rt_rq[i]);
134 if (tg->rt_se)
135 kfree(tg->rt_se[i]);
136 }
137
138 kfree(tg->rt_rq);
139 kfree(tg->rt_se);
140 }
141
142 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
143 struct sched_rt_entity *rt_se, int cpu,
144 struct sched_rt_entity *parent)
145 {
146 struct rq *rq = cpu_rq(cpu);
147
148 rt_rq->highest_prio.curr = MAX_RT_PRIO;
149 rt_rq->rt_nr_boosted = 0;
150 rt_rq->rq = rq;
151 rt_rq->tg = tg;
152
153 tg->rt_rq[cpu] = rt_rq;
154 tg->rt_se[cpu] = rt_se;
155
156 if (!rt_se)
157 return;
158
159 if (!parent)
160 rt_se->rt_rq = &rq->rt;
161 else
162 rt_se->rt_rq = parent->my_q;
163
164 rt_se->my_q = rt_rq;
165 rt_se->parent = parent;
166 INIT_LIST_HEAD(&rt_se->run_list);
167 }
168
169 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
170 {
171 struct rt_rq *rt_rq;
172 struct sched_rt_entity *rt_se;
173 int i;
174
175 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
176 if (!tg->rt_rq)
177 goto err;
178 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
179 if (!tg->rt_se)
180 goto err;
181
182 init_rt_bandwidth(&tg->rt_bandwidth,
183 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
184
185 for_each_possible_cpu(i) {
186 rt_rq = kzalloc_node(sizeof(struct rt_rq),
187 GFP_KERNEL, cpu_to_node(i));
188 if (!rt_rq)
189 goto err;
190
191 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
192 GFP_KERNEL, cpu_to_node(i));
193 if (!rt_se)
194 goto err_free_rq;
195
196 init_rt_rq(rt_rq, cpu_rq(i));
197 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
198 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
199 }
200
201 return 1;
202
203 err_free_rq:
204 kfree(rt_rq);
205 err:
206 return 0;
207 }
208
209 #else /* CONFIG_RT_GROUP_SCHED */
210
211 #define rt_entity_is_task(rt_se) (1)
212
213 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
214 {
215 return container_of(rt_se, struct task_struct, rt);
216 }
217
218 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
219 {
220 return container_of(rt_rq, struct rq, rt);
221 }
222
223 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
224 {
225 struct task_struct *p = rt_task_of(rt_se);
226
227 return task_rq(p);
228 }
229
230 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
231 {
232 struct rq *rq = rq_of_rt_se(rt_se);
233
234 return &rq->rt;
235 }
236
237 void free_rt_sched_group(struct task_group *tg) { }
238
239 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
240 {
241 return 1;
242 }
243 #endif /* CONFIG_RT_GROUP_SCHED */
244
245 #ifdef CONFIG_SMP
246
247 static int pull_rt_task(struct rq *this_rq);
248
249 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
250 {
251 /* Try to pull RT tasks here if we lower this rq's prio */
252 return rq->rt.highest_prio.curr > prev->prio;
253 }
254
255 static inline int rt_overloaded(struct rq *rq)
256 {
257 return atomic_read(&rq->rd->rto_count);
258 }
259
260 static inline void rt_set_overload(struct rq *rq)
261 {
262 if (!rq->online)
263 return;
264
265 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
266 /*
267 * Make sure the mask is visible before we set
268 * the overload count. That is checked to determine
269 * if we should look at the mask. It would be a shame
270 * if we looked at the mask, but the mask was not
271 * updated yet.
272 *
273 * Matched by the barrier in pull_rt_task().
274 */
275 smp_wmb();
276 atomic_inc(&rq->rd->rto_count);
277 }
278
279 static inline void rt_clear_overload(struct rq *rq)
280 {
281 if (!rq->online)
282 return;
283
284 /* the order here really doesn't matter */
285 atomic_dec(&rq->rd->rto_count);
286 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
287 }
288
289 static void update_rt_migration(struct rt_rq *rt_rq)
290 {
291 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
292 if (!rt_rq->overloaded) {
293 rt_set_overload(rq_of_rt_rq(rt_rq));
294 rt_rq->overloaded = 1;
295 }
296 } else if (rt_rq->overloaded) {
297 rt_clear_overload(rq_of_rt_rq(rt_rq));
298 rt_rq->overloaded = 0;
299 }
300 }
301
302 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
303 {
304 struct task_struct *p;
305
306 if (!rt_entity_is_task(rt_se))
307 return;
308
309 p = rt_task_of(rt_se);
310 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
311
312 rt_rq->rt_nr_total++;
313 if (p->nr_cpus_allowed > 1)
314 rt_rq->rt_nr_migratory++;
315
316 update_rt_migration(rt_rq);
317 }
318
319 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
320 {
321 struct task_struct *p;
322
323 if (!rt_entity_is_task(rt_se))
324 return;
325
326 p = rt_task_of(rt_se);
327 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
328
329 rt_rq->rt_nr_total--;
330 if (p->nr_cpus_allowed > 1)
331 rt_rq->rt_nr_migratory--;
332
333 update_rt_migration(rt_rq);
334 }
335
336 static inline int has_pushable_tasks(struct rq *rq)
337 {
338 return !plist_head_empty(&rq->rt.pushable_tasks);
339 }
340
341 static inline void set_post_schedule(struct rq *rq)
342 {
343 /*
344 * We detect this state here so that we can avoid taking the RQ
345 * lock again later if there is no need to push
346 */
347 rq->post_schedule = has_pushable_tasks(rq);
348 }
349
350 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
351 {
352 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
353 plist_node_init(&p->pushable_tasks, p->prio);
354 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
355
356 /* Update the highest prio pushable task */
357 if (p->prio < rq->rt.highest_prio.next)
358 rq->rt.highest_prio.next = p->prio;
359 }
360
361 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
362 {
363 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
364
365 /* Update the new highest prio pushable task */
366 if (has_pushable_tasks(rq)) {
367 p = plist_first_entry(&rq->rt.pushable_tasks,
368 struct task_struct, pushable_tasks);
369 rq->rt.highest_prio.next = p->prio;
370 } else
371 rq->rt.highest_prio.next = MAX_RT_PRIO;
372 }
373
374 #else
375
376 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377 {
378 }
379
380 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
381 {
382 }
383
384 static inline
385 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
386 {
387 }
388
389 static inline
390 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
391 {
392 }
393
394 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
395 {
396 return false;
397 }
398
399 static inline int pull_rt_task(struct rq *this_rq)
400 {
401 return 0;
402 }
403
404 static inline void set_post_schedule(struct rq *rq)
405 {
406 }
407 #endif /* CONFIG_SMP */
408
409 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411
412 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
413 {
414 return !list_empty(&rt_se->run_list);
415 }
416
417 #ifdef CONFIG_RT_GROUP_SCHED
418
419 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
420 {
421 if (!rt_rq->tg)
422 return RUNTIME_INF;
423
424 return rt_rq->rt_runtime;
425 }
426
427 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
428 {
429 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
430 }
431
432 typedef struct task_group *rt_rq_iter_t;
433
434 static inline struct task_group *next_task_group(struct task_group *tg)
435 {
436 do {
437 tg = list_entry_rcu(tg->list.next,
438 typeof(struct task_group), list);
439 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
440
441 if (&tg->list == &task_groups)
442 tg = NULL;
443
444 return tg;
445 }
446
447 #define for_each_rt_rq(rt_rq, iter, rq) \
448 for (iter = container_of(&task_groups, typeof(*iter), list); \
449 (iter = next_task_group(iter)) && \
450 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
451
452 #define for_each_sched_rt_entity(rt_se) \
453 for (; rt_se; rt_se = rt_se->parent)
454
455 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
456 {
457 return rt_se->my_q;
458 }
459
460 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
461 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
462
463 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
464 {
465 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
466 struct rq *rq = rq_of_rt_rq(rt_rq);
467 struct sched_rt_entity *rt_se;
468
469 int cpu = cpu_of(rq);
470
471 rt_se = rt_rq->tg->rt_se[cpu];
472
473 if (rt_rq->rt_nr_running) {
474 if (!rt_se)
475 enqueue_top_rt_rq(rt_rq);
476 else if (!on_rt_rq(rt_se))
477 enqueue_rt_entity(rt_se, false);
478
479 if (rt_rq->highest_prio.curr < curr->prio)
480 resched_curr(rq);
481 }
482 }
483
484 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
485 {
486 struct sched_rt_entity *rt_se;
487 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
488
489 rt_se = rt_rq->tg->rt_se[cpu];
490
491 if (!rt_se)
492 dequeue_top_rt_rq(rt_rq);
493 else if (on_rt_rq(rt_se))
494 dequeue_rt_entity(rt_se);
495 }
496
497 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
498 {
499 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
500 }
501
502 static int rt_se_boosted(struct sched_rt_entity *rt_se)
503 {
504 struct rt_rq *rt_rq = group_rt_rq(rt_se);
505 struct task_struct *p;
506
507 if (rt_rq)
508 return !!rt_rq->rt_nr_boosted;
509
510 p = rt_task_of(rt_se);
511 return p->prio != p->normal_prio;
512 }
513
514 #ifdef CONFIG_SMP
515 static inline const struct cpumask *sched_rt_period_mask(void)
516 {
517 return this_rq()->rd->span;
518 }
519 #else
520 static inline const struct cpumask *sched_rt_period_mask(void)
521 {
522 return cpu_online_mask;
523 }
524 #endif
525
526 static inline
527 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
528 {
529 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
530 }
531
532 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
533 {
534 return &rt_rq->tg->rt_bandwidth;
535 }
536
537 #else /* !CONFIG_RT_GROUP_SCHED */
538
539 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
540 {
541 return rt_rq->rt_runtime;
542 }
543
544 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
545 {
546 return ktime_to_ns(def_rt_bandwidth.rt_period);
547 }
548
549 typedef struct rt_rq *rt_rq_iter_t;
550
551 #define for_each_rt_rq(rt_rq, iter, rq) \
552 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
553
554 #define for_each_sched_rt_entity(rt_se) \
555 for (; rt_se; rt_se = NULL)
556
557 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
558 {
559 return NULL;
560 }
561
562 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
563 {
564 struct rq *rq = rq_of_rt_rq(rt_rq);
565
566 if (!rt_rq->rt_nr_running)
567 return;
568
569 enqueue_top_rt_rq(rt_rq);
570 resched_curr(rq);
571 }
572
573 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
574 {
575 dequeue_top_rt_rq(rt_rq);
576 }
577
578 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
579 {
580 return rt_rq->rt_throttled;
581 }
582
583 static inline const struct cpumask *sched_rt_period_mask(void)
584 {
585 return cpu_online_mask;
586 }
587
588 static inline
589 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
590 {
591 return &cpu_rq(cpu)->rt;
592 }
593
594 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
595 {
596 return &def_rt_bandwidth;
597 }
598
599 #endif /* CONFIG_RT_GROUP_SCHED */
600
601 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
602 {
603 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
604
605 return (hrtimer_active(&rt_b->rt_period_timer) ||
606 rt_rq->rt_time < rt_b->rt_runtime);
607 }
608
609 #ifdef CONFIG_SMP
610 /*
611 * We ran out of runtime, see if we can borrow some from our neighbours.
612 */
613 static int do_balance_runtime(struct rt_rq *rt_rq)
614 {
615 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
616 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
617 int i, weight, more = 0;
618 u64 rt_period;
619
620 weight = cpumask_weight(rd->span);
621
622 raw_spin_lock(&rt_b->rt_runtime_lock);
623 rt_period = ktime_to_ns(rt_b->rt_period);
624 for_each_cpu(i, rd->span) {
625 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
626 s64 diff;
627
628 if (iter == rt_rq)
629 continue;
630
631 raw_spin_lock(&iter->rt_runtime_lock);
632 /*
633 * Either all rqs have inf runtime and there's nothing to steal
634 * or __disable_runtime() below sets a specific rq to inf to
635 * indicate its been disabled and disalow stealing.
636 */
637 if (iter->rt_runtime == RUNTIME_INF)
638 goto next;
639
640 /*
641 * From runqueues with spare time, take 1/n part of their
642 * spare time, but no more than our period.
643 */
644 diff = iter->rt_runtime - iter->rt_time;
645 if (diff > 0) {
646 diff = div_u64((u64)diff, weight);
647 if (rt_rq->rt_runtime + diff > rt_period)
648 diff = rt_period - rt_rq->rt_runtime;
649 iter->rt_runtime -= diff;
650 rt_rq->rt_runtime += diff;
651 more = 1;
652 if (rt_rq->rt_runtime == rt_period) {
653 raw_spin_unlock(&iter->rt_runtime_lock);
654 break;
655 }
656 }
657 next:
658 raw_spin_unlock(&iter->rt_runtime_lock);
659 }
660 raw_spin_unlock(&rt_b->rt_runtime_lock);
661
662 return more;
663 }
664
665 /*
666 * Ensure this RQ takes back all the runtime it lend to its neighbours.
667 */
668 static void __disable_runtime(struct rq *rq)
669 {
670 struct root_domain *rd = rq->rd;
671 rt_rq_iter_t iter;
672 struct rt_rq *rt_rq;
673
674 if (unlikely(!scheduler_running))
675 return;
676
677 for_each_rt_rq(rt_rq, iter, rq) {
678 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
679 s64 want;
680 int i;
681
682 raw_spin_lock(&rt_b->rt_runtime_lock);
683 raw_spin_lock(&rt_rq->rt_runtime_lock);
684 /*
685 * Either we're all inf and nobody needs to borrow, or we're
686 * already disabled and thus have nothing to do, or we have
687 * exactly the right amount of runtime to take out.
688 */
689 if (rt_rq->rt_runtime == RUNTIME_INF ||
690 rt_rq->rt_runtime == rt_b->rt_runtime)
691 goto balanced;
692 raw_spin_unlock(&rt_rq->rt_runtime_lock);
693
694 /*
695 * Calculate the difference between what we started out with
696 * and what we current have, that's the amount of runtime
697 * we lend and now have to reclaim.
698 */
699 want = rt_b->rt_runtime - rt_rq->rt_runtime;
700
701 /*
702 * Greedy reclaim, take back as much as we can.
703 */
704 for_each_cpu(i, rd->span) {
705 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
706 s64 diff;
707
708 /*
709 * Can't reclaim from ourselves or disabled runqueues.
710 */
711 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
712 continue;
713
714 raw_spin_lock(&iter->rt_runtime_lock);
715 if (want > 0) {
716 diff = min_t(s64, iter->rt_runtime, want);
717 iter->rt_runtime -= diff;
718 want -= diff;
719 } else {
720 iter->rt_runtime -= want;
721 want -= want;
722 }
723 raw_spin_unlock(&iter->rt_runtime_lock);
724
725 if (!want)
726 break;
727 }
728
729 raw_spin_lock(&rt_rq->rt_runtime_lock);
730 /*
731 * We cannot be left wanting - that would mean some runtime
732 * leaked out of the system.
733 */
734 BUG_ON(want);
735 balanced:
736 /*
737 * Disable all the borrow logic by pretending we have inf
738 * runtime - in which case borrowing doesn't make sense.
739 */
740 rt_rq->rt_runtime = RUNTIME_INF;
741 rt_rq->rt_throttled = 0;
742 raw_spin_unlock(&rt_rq->rt_runtime_lock);
743 raw_spin_unlock(&rt_b->rt_runtime_lock);
744
745 /* Make rt_rq available for pick_next_task() */
746 sched_rt_rq_enqueue(rt_rq);
747 }
748 }
749
750 static void __enable_runtime(struct rq *rq)
751 {
752 rt_rq_iter_t iter;
753 struct rt_rq *rt_rq;
754
755 if (unlikely(!scheduler_running))
756 return;
757
758 /*
759 * Reset each runqueue's bandwidth settings
760 */
761 for_each_rt_rq(rt_rq, iter, rq) {
762 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
763
764 raw_spin_lock(&rt_b->rt_runtime_lock);
765 raw_spin_lock(&rt_rq->rt_runtime_lock);
766 rt_rq->rt_runtime = rt_b->rt_runtime;
767 rt_rq->rt_time = 0;
768 rt_rq->rt_throttled = 0;
769 raw_spin_unlock(&rt_rq->rt_runtime_lock);
770 raw_spin_unlock(&rt_b->rt_runtime_lock);
771 }
772 }
773
774 static int balance_runtime(struct rt_rq *rt_rq)
775 {
776 int more = 0;
777
778 if (!sched_feat(RT_RUNTIME_SHARE))
779 return more;
780
781 if (rt_rq->rt_time > rt_rq->rt_runtime) {
782 raw_spin_unlock(&rt_rq->rt_runtime_lock);
783 more = do_balance_runtime(rt_rq);
784 raw_spin_lock(&rt_rq->rt_runtime_lock);
785 }
786
787 return more;
788 }
789 #else /* !CONFIG_SMP */
790 static inline int balance_runtime(struct rt_rq *rt_rq)
791 {
792 return 0;
793 }
794 #endif /* CONFIG_SMP */
795
796 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
797 {
798 int i, idle = 1, throttled = 0;
799 const struct cpumask *span;
800
801 span = sched_rt_period_mask();
802 #ifdef CONFIG_RT_GROUP_SCHED
803 /*
804 * FIXME: isolated CPUs should really leave the root task group,
805 * whether they are isolcpus or were isolated via cpusets, lest
806 * the timer run on a CPU which does not service all runqueues,
807 * potentially leaving other CPUs indefinitely throttled. If
808 * isolation is really required, the user will turn the throttle
809 * off to kill the perturbations it causes anyway. Meanwhile,
810 * this maintains functionality for boot and/or troubleshooting.
811 */
812 if (rt_b == &root_task_group.rt_bandwidth)
813 span = cpu_online_mask;
814 #endif
815 for_each_cpu(i, span) {
816 int enqueue = 0;
817 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
818 struct rq *rq = rq_of_rt_rq(rt_rq);
819
820 raw_spin_lock(&rq->lock);
821 if (rt_rq->rt_time) {
822 u64 runtime;
823
824 raw_spin_lock(&rt_rq->rt_runtime_lock);
825 if (rt_rq->rt_throttled)
826 balance_runtime(rt_rq);
827 runtime = rt_rq->rt_runtime;
828 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
829 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
830 rt_rq->rt_throttled = 0;
831 enqueue = 1;
832
833 /*
834 * Force a clock update if the CPU was idle,
835 * lest wakeup -> unthrottle time accumulate.
836 */
837 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
838 rq->skip_clock_update = -1;
839 }
840 if (rt_rq->rt_time || rt_rq->rt_nr_running)
841 idle = 0;
842 raw_spin_unlock(&rt_rq->rt_runtime_lock);
843 } else if (rt_rq->rt_nr_running) {
844 idle = 0;
845 if (!rt_rq_throttled(rt_rq))
846 enqueue = 1;
847 }
848 if (rt_rq->rt_throttled)
849 throttled = 1;
850
851 if (enqueue)
852 sched_rt_rq_enqueue(rt_rq);
853 raw_spin_unlock(&rq->lock);
854 }
855
856 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
857 return 1;
858
859 return idle;
860 }
861
862 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
863 {
864 #ifdef CONFIG_RT_GROUP_SCHED
865 struct rt_rq *rt_rq = group_rt_rq(rt_se);
866
867 if (rt_rq)
868 return rt_rq->highest_prio.curr;
869 #endif
870
871 return rt_task_of(rt_se)->prio;
872 }
873
874 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
875 {
876 u64 runtime = sched_rt_runtime(rt_rq);
877
878 if (rt_rq->rt_throttled)
879 return rt_rq_throttled(rt_rq);
880
881 if (runtime >= sched_rt_period(rt_rq))
882 return 0;
883
884 balance_runtime(rt_rq);
885 runtime = sched_rt_runtime(rt_rq);
886 if (runtime == RUNTIME_INF)
887 return 0;
888
889 if (rt_rq->rt_time > runtime) {
890 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
891
892 /*
893 * Don't actually throttle groups that have no runtime assigned
894 * but accrue some time due to boosting.
895 */
896 if (likely(rt_b->rt_runtime)) {
897 rt_rq->rt_throttled = 1;
898 printk_deferred_once("sched: RT throttling activated\n");
899 } else {
900 /*
901 * In case we did anyway, make it go away,
902 * replenishment is a joke, since it will replenish us
903 * with exactly 0 ns.
904 */
905 rt_rq->rt_time = 0;
906 }
907
908 if (rt_rq_throttled(rt_rq)) {
909 sched_rt_rq_dequeue(rt_rq);
910 return 1;
911 }
912 }
913
914 return 0;
915 }
916
917 /*
918 * Update the current task's runtime statistics. Skip current tasks that
919 * are not in our scheduling class.
920 */
921 static void update_curr_rt(struct rq *rq)
922 {
923 struct task_struct *curr = rq->curr;
924 struct sched_rt_entity *rt_se = &curr->rt;
925 u64 delta_exec;
926
927 if (curr->sched_class != &rt_sched_class)
928 return;
929
930 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
931 if (unlikely((s64)delta_exec <= 0))
932 return;
933
934 schedstat_set(curr->se.statistics.exec_max,
935 max(curr->se.statistics.exec_max, delta_exec));
936
937 curr->se.sum_exec_runtime += delta_exec;
938 account_group_exec_runtime(curr, delta_exec);
939
940 curr->se.exec_start = rq_clock_task(rq);
941 cpuacct_charge(curr, delta_exec);
942
943 sched_rt_avg_update(rq, delta_exec);
944
945 if (!rt_bandwidth_enabled())
946 return;
947
948 for_each_sched_rt_entity(rt_se) {
949 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
950
951 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
952 raw_spin_lock(&rt_rq->rt_runtime_lock);
953 rt_rq->rt_time += delta_exec;
954 if (sched_rt_runtime_exceeded(rt_rq))
955 resched_curr(rq);
956 raw_spin_unlock(&rt_rq->rt_runtime_lock);
957 }
958 }
959 }
960
961 static void
962 dequeue_top_rt_rq(struct rt_rq *rt_rq)
963 {
964 struct rq *rq = rq_of_rt_rq(rt_rq);
965
966 BUG_ON(&rq->rt != rt_rq);
967
968 if (!rt_rq->rt_queued)
969 return;
970
971 BUG_ON(!rq->nr_running);
972
973 sub_nr_running(rq, rt_rq->rt_nr_running);
974 rt_rq->rt_queued = 0;
975 }
976
977 static void
978 enqueue_top_rt_rq(struct rt_rq *rt_rq)
979 {
980 struct rq *rq = rq_of_rt_rq(rt_rq);
981
982 BUG_ON(&rq->rt != rt_rq);
983
984 if (rt_rq->rt_queued)
985 return;
986 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
987 return;
988
989 add_nr_running(rq, rt_rq->rt_nr_running);
990 rt_rq->rt_queued = 1;
991 }
992
993 #if defined CONFIG_SMP
994
995 static void
996 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
997 {
998 struct rq *rq = rq_of_rt_rq(rt_rq);
999
1000 #ifdef CONFIG_RT_GROUP_SCHED
1001 /*
1002 * Change rq's cpupri only if rt_rq is the top queue.
1003 */
1004 if (&rq->rt != rt_rq)
1005 return;
1006 #endif
1007 if (rq->online && prio < prev_prio)
1008 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1009 }
1010
1011 static void
1012 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1013 {
1014 struct rq *rq = rq_of_rt_rq(rt_rq);
1015
1016 #ifdef CONFIG_RT_GROUP_SCHED
1017 /*
1018 * Change rq's cpupri only if rt_rq is the top queue.
1019 */
1020 if (&rq->rt != rt_rq)
1021 return;
1022 #endif
1023 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1024 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1025 }
1026
1027 #else /* CONFIG_SMP */
1028
1029 static inline
1030 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1031 static inline
1032 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1033
1034 #endif /* CONFIG_SMP */
1035
1036 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1037 static void
1038 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1039 {
1040 int prev_prio = rt_rq->highest_prio.curr;
1041
1042 if (prio < prev_prio)
1043 rt_rq->highest_prio.curr = prio;
1044
1045 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1046 }
1047
1048 static void
1049 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1050 {
1051 int prev_prio = rt_rq->highest_prio.curr;
1052
1053 if (rt_rq->rt_nr_running) {
1054
1055 WARN_ON(prio < prev_prio);
1056
1057 /*
1058 * This may have been our highest task, and therefore
1059 * we may have some recomputation to do
1060 */
1061 if (prio == prev_prio) {
1062 struct rt_prio_array *array = &rt_rq->active;
1063
1064 rt_rq->highest_prio.curr =
1065 sched_find_first_bit(array->bitmap);
1066 }
1067
1068 } else
1069 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1070
1071 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1072 }
1073
1074 #else
1075
1076 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1077 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1078
1079 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1080
1081 #ifdef CONFIG_RT_GROUP_SCHED
1082
1083 static void
1084 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1085 {
1086 if (rt_se_boosted(rt_se))
1087 rt_rq->rt_nr_boosted++;
1088
1089 if (rt_rq->tg)
1090 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1091 }
1092
1093 static void
1094 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1095 {
1096 if (rt_se_boosted(rt_se))
1097 rt_rq->rt_nr_boosted--;
1098
1099 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1100 }
1101
1102 #else /* CONFIG_RT_GROUP_SCHED */
1103
1104 static void
1105 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1106 {
1107 start_rt_bandwidth(&def_rt_bandwidth);
1108 }
1109
1110 static inline
1111 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1112
1113 #endif /* CONFIG_RT_GROUP_SCHED */
1114
1115 static inline
1116 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1117 {
1118 struct rt_rq *group_rq = group_rt_rq(rt_se);
1119
1120 if (group_rq)
1121 return group_rq->rt_nr_running;
1122 else
1123 return 1;
1124 }
1125
1126 static inline
1127 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1128 {
1129 int prio = rt_se_prio(rt_se);
1130
1131 WARN_ON(!rt_prio(prio));
1132 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1133
1134 inc_rt_prio(rt_rq, prio);
1135 inc_rt_migration(rt_se, rt_rq);
1136 inc_rt_group(rt_se, rt_rq);
1137 }
1138
1139 static inline
1140 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1141 {
1142 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1143 WARN_ON(!rt_rq->rt_nr_running);
1144 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1145
1146 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1147 dec_rt_migration(rt_se, rt_rq);
1148 dec_rt_group(rt_se, rt_rq);
1149 }
1150
1151 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1152 {
1153 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1154 struct rt_prio_array *array = &rt_rq->active;
1155 struct rt_rq *group_rq = group_rt_rq(rt_se);
1156 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1157
1158 /*
1159 * Don't enqueue the group if its throttled, or when empty.
1160 * The latter is a consequence of the former when a child group
1161 * get throttled and the current group doesn't have any other
1162 * active members.
1163 */
1164 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1165 return;
1166
1167 if (head)
1168 list_add(&rt_se->run_list, queue);
1169 else
1170 list_add_tail(&rt_se->run_list, queue);
1171 __set_bit(rt_se_prio(rt_se), array->bitmap);
1172
1173 inc_rt_tasks(rt_se, rt_rq);
1174 }
1175
1176 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1177 {
1178 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1179 struct rt_prio_array *array = &rt_rq->active;
1180
1181 list_del_init(&rt_se->run_list);
1182 if (list_empty(array->queue + rt_se_prio(rt_se)))
1183 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1184
1185 dec_rt_tasks(rt_se, rt_rq);
1186 }
1187
1188 /*
1189 * Because the prio of an upper entry depends on the lower
1190 * entries, we must remove entries top - down.
1191 */
1192 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1193 {
1194 struct sched_rt_entity *back = NULL;
1195
1196 for_each_sched_rt_entity(rt_se) {
1197 rt_se->back = back;
1198 back = rt_se;
1199 }
1200
1201 dequeue_top_rt_rq(rt_rq_of_se(back));
1202
1203 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1204 if (on_rt_rq(rt_se))
1205 __dequeue_rt_entity(rt_se);
1206 }
1207 }
1208
1209 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1210 {
1211 struct rq *rq = rq_of_rt_se(rt_se);
1212
1213 dequeue_rt_stack(rt_se);
1214 for_each_sched_rt_entity(rt_se)
1215 __enqueue_rt_entity(rt_se, head);
1216 enqueue_top_rt_rq(&rq->rt);
1217 }
1218
1219 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1220 {
1221 struct rq *rq = rq_of_rt_se(rt_se);
1222
1223 dequeue_rt_stack(rt_se);
1224
1225 for_each_sched_rt_entity(rt_se) {
1226 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1227
1228 if (rt_rq && rt_rq->rt_nr_running)
1229 __enqueue_rt_entity(rt_se, false);
1230 }
1231 enqueue_top_rt_rq(&rq->rt);
1232 }
1233
1234 /*
1235 * Adding/removing a task to/from a priority array:
1236 */
1237 static void
1238 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1239 {
1240 struct sched_rt_entity *rt_se = &p->rt;
1241
1242 if (flags & ENQUEUE_WAKEUP)
1243 rt_se->timeout = 0;
1244
1245 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1246
1247 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1248 enqueue_pushable_task(rq, p);
1249 }
1250
1251 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1252 {
1253 struct sched_rt_entity *rt_se = &p->rt;
1254
1255 update_curr_rt(rq);
1256 dequeue_rt_entity(rt_se);
1257
1258 dequeue_pushable_task(rq, p);
1259 }
1260
1261 /*
1262 * Put task to the head or the end of the run list without the overhead of
1263 * dequeue followed by enqueue.
1264 */
1265 static void
1266 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1267 {
1268 if (on_rt_rq(rt_se)) {
1269 struct rt_prio_array *array = &rt_rq->active;
1270 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1271
1272 if (head)
1273 list_move(&rt_se->run_list, queue);
1274 else
1275 list_move_tail(&rt_se->run_list, queue);
1276 }
1277 }
1278
1279 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1280 {
1281 struct sched_rt_entity *rt_se = &p->rt;
1282 struct rt_rq *rt_rq;
1283
1284 for_each_sched_rt_entity(rt_se) {
1285 rt_rq = rt_rq_of_se(rt_se);
1286 requeue_rt_entity(rt_rq, rt_se, head);
1287 }
1288 }
1289
1290 static void yield_task_rt(struct rq *rq)
1291 {
1292 requeue_task_rt(rq, rq->curr, 0);
1293 }
1294
1295 #ifdef CONFIG_SMP
1296 static int find_lowest_rq(struct task_struct *task);
1297
1298 static int
1299 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1300 {
1301 struct task_struct *curr;
1302 struct rq *rq;
1303
1304 if (p->nr_cpus_allowed == 1)
1305 goto out;
1306
1307 /* For anything but wake ups, just return the task_cpu */
1308 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1309 goto out;
1310
1311 rq = cpu_rq(cpu);
1312
1313 rcu_read_lock();
1314 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1315
1316 /*
1317 * If the current task on @p's runqueue is an RT task, then
1318 * try to see if we can wake this RT task up on another
1319 * runqueue. Otherwise simply start this RT task
1320 * on its current runqueue.
1321 *
1322 * We want to avoid overloading runqueues. If the woken
1323 * task is a higher priority, then it will stay on this CPU
1324 * and the lower prio task should be moved to another CPU.
1325 * Even though this will probably make the lower prio task
1326 * lose its cache, we do not want to bounce a higher task
1327 * around just because it gave up its CPU, perhaps for a
1328 * lock?
1329 *
1330 * For equal prio tasks, we just let the scheduler sort it out.
1331 *
1332 * Otherwise, just let it ride on the affined RQ and the
1333 * post-schedule router will push the preempted task away
1334 *
1335 * This test is optimistic, if we get it wrong the load-balancer
1336 * will have to sort it out.
1337 */
1338 if (curr && unlikely(rt_task(curr)) &&
1339 (curr->nr_cpus_allowed < 2 ||
1340 curr->prio <= p->prio)) {
1341 int target = find_lowest_rq(p);
1342
1343 if (target != -1)
1344 cpu = target;
1345 }
1346 rcu_read_unlock();
1347
1348 out:
1349 return cpu;
1350 }
1351
1352 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1353 {
1354 if (rq->curr->nr_cpus_allowed == 1)
1355 return;
1356
1357 if (p->nr_cpus_allowed != 1
1358 && cpupri_find(&rq->rd->cpupri, p, NULL))
1359 return;
1360
1361 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1362 return;
1363
1364 /*
1365 * There appears to be other cpus that can accept
1366 * current and none to run 'p', so lets reschedule
1367 * to try and push current away:
1368 */
1369 requeue_task_rt(rq, p, 1);
1370 resched_curr(rq);
1371 }
1372
1373 #endif /* CONFIG_SMP */
1374
1375 /*
1376 * Preempt the current task with a newly woken task if needed:
1377 */
1378 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1379 {
1380 if (p->prio < rq->curr->prio) {
1381 resched_curr(rq);
1382 return;
1383 }
1384
1385 #ifdef CONFIG_SMP
1386 /*
1387 * If:
1388 *
1389 * - the newly woken task is of equal priority to the current task
1390 * - the newly woken task is non-migratable while current is migratable
1391 * - current will be preempted on the next reschedule
1392 *
1393 * we should check to see if current can readily move to a different
1394 * cpu. If so, we will reschedule to allow the push logic to try
1395 * to move current somewhere else, making room for our non-migratable
1396 * task.
1397 */
1398 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1399 check_preempt_equal_prio(rq, p);
1400 #endif
1401 }
1402
1403 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1404 struct rt_rq *rt_rq)
1405 {
1406 struct rt_prio_array *array = &rt_rq->active;
1407 struct sched_rt_entity *next = NULL;
1408 struct list_head *queue;
1409 int idx;
1410
1411 idx = sched_find_first_bit(array->bitmap);
1412 BUG_ON(idx >= MAX_RT_PRIO);
1413
1414 queue = array->queue + idx;
1415 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1416
1417 return next;
1418 }
1419
1420 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1421 {
1422 struct sched_rt_entity *rt_se;
1423 struct task_struct *p;
1424 struct rt_rq *rt_rq = &rq->rt;
1425
1426 do {
1427 rt_se = pick_next_rt_entity(rq, rt_rq);
1428 BUG_ON(!rt_se);
1429 rt_rq = group_rt_rq(rt_se);
1430 } while (rt_rq);
1431
1432 p = rt_task_of(rt_se);
1433 p->se.exec_start = rq_clock_task(rq);
1434
1435 return p;
1436 }
1437
1438 static struct task_struct *
1439 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1440 {
1441 struct task_struct *p;
1442 struct rt_rq *rt_rq = &rq->rt;
1443
1444 if (need_pull_rt_task(rq, prev)) {
1445 pull_rt_task(rq);
1446 /*
1447 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1448 * means a dl or stop task can slip in, in which case we need
1449 * to re-start task selection.
1450 */
1451 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1452 rq->dl.dl_nr_running))
1453 return RETRY_TASK;
1454 }
1455
1456 /*
1457 * We may dequeue prev's rt_rq in put_prev_task().
1458 * So, we update time before rt_nr_running check.
1459 */
1460 if (prev->sched_class == &rt_sched_class)
1461 update_curr_rt(rq);
1462
1463 if (!rt_rq->rt_queued)
1464 return NULL;
1465
1466 put_prev_task(rq, prev);
1467
1468 p = _pick_next_task_rt(rq);
1469
1470 /* The running task is never eligible for pushing */
1471 dequeue_pushable_task(rq, p);
1472
1473 set_post_schedule(rq);
1474
1475 return p;
1476 }
1477
1478 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1479 {
1480 update_curr_rt(rq);
1481
1482 /*
1483 * The previous task needs to be made eligible for pushing
1484 * if it is still active
1485 */
1486 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1487 enqueue_pushable_task(rq, p);
1488 }
1489
1490 #ifdef CONFIG_SMP
1491
1492 /* Only try algorithms three times */
1493 #define RT_MAX_TRIES 3
1494
1495 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1496 {
1497 if (!task_running(rq, p) &&
1498 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1499 return 1;
1500 return 0;
1501 }
1502
1503 /*
1504 * Return the highest pushable rq's task, which is suitable to be executed
1505 * on the cpu, NULL otherwise
1506 */
1507 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1508 {
1509 struct plist_head *head = &rq->rt.pushable_tasks;
1510 struct task_struct *p;
1511
1512 if (!has_pushable_tasks(rq))
1513 return NULL;
1514
1515 plist_for_each_entry(p, head, pushable_tasks) {
1516 if (pick_rt_task(rq, p, cpu))
1517 return p;
1518 }
1519
1520 return NULL;
1521 }
1522
1523 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1524
1525 static int find_lowest_rq(struct task_struct *task)
1526 {
1527 struct sched_domain *sd;
1528 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1529 int this_cpu = smp_processor_id();
1530 int cpu = task_cpu(task);
1531
1532 /* Make sure the mask is initialized first */
1533 if (unlikely(!lowest_mask))
1534 return -1;
1535
1536 if (task->nr_cpus_allowed == 1)
1537 return -1; /* No other targets possible */
1538
1539 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1540 return -1; /* No targets found */
1541
1542 /*
1543 * At this point we have built a mask of cpus representing the
1544 * lowest priority tasks in the system. Now we want to elect
1545 * the best one based on our affinity and topology.
1546 *
1547 * We prioritize the last cpu that the task executed on since
1548 * it is most likely cache-hot in that location.
1549 */
1550 if (cpumask_test_cpu(cpu, lowest_mask))
1551 return cpu;
1552
1553 /*
1554 * Otherwise, we consult the sched_domains span maps to figure
1555 * out which cpu is logically closest to our hot cache data.
1556 */
1557 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1558 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1559
1560 rcu_read_lock();
1561 for_each_domain(cpu, sd) {
1562 if (sd->flags & SD_WAKE_AFFINE) {
1563 int best_cpu;
1564
1565 /*
1566 * "this_cpu" is cheaper to preempt than a
1567 * remote processor.
1568 */
1569 if (this_cpu != -1 &&
1570 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1571 rcu_read_unlock();
1572 return this_cpu;
1573 }
1574
1575 best_cpu = cpumask_first_and(lowest_mask,
1576 sched_domain_span(sd));
1577 if (best_cpu < nr_cpu_ids) {
1578 rcu_read_unlock();
1579 return best_cpu;
1580 }
1581 }
1582 }
1583 rcu_read_unlock();
1584
1585 /*
1586 * And finally, if there were no matches within the domains
1587 * just give the caller *something* to work with from the compatible
1588 * locations.
1589 */
1590 if (this_cpu != -1)
1591 return this_cpu;
1592
1593 cpu = cpumask_any(lowest_mask);
1594 if (cpu < nr_cpu_ids)
1595 return cpu;
1596 return -1;
1597 }
1598
1599 /* Will lock the rq it finds */
1600 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1601 {
1602 struct rq *lowest_rq = NULL;
1603 int tries;
1604 int cpu;
1605
1606 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1607 cpu = find_lowest_rq(task);
1608
1609 if ((cpu == -1) || (cpu == rq->cpu))
1610 break;
1611
1612 lowest_rq = cpu_rq(cpu);
1613
1614 /* if the prio of this runqueue changed, try again */
1615 if (double_lock_balance(rq, lowest_rq)) {
1616 /*
1617 * We had to unlock the run queue. In
1618 * the mean time, task could have
1619 * migrated already or had its affinity changed.
1620 * Also make sure that it wasn't scheduled on its rq.
1621 */
1622 if (unlikely(task_rq(task) != rq ||
1623 !cpumask_test_cpu(lowest_rq->cpu,
1624 tsk_cpus_allowed(task)) ||
1625 task_running(rq, task) ||
1626 !task_on_rq_queued(task))) {
1627
1628 double_unlock_balance(rq, lowest_rq);
1629 lowest_rq = NULL;
1630 break;
1631 }
1632 }
1633
1634 /* If this rq is still suitable use it. */
1635 if (lowest_rq->rt.highest_prio.curr > task->prio)
1636 break;
1637
1638 /* try again */
1639 double_unlock_balance(rq, lowest_rq);
1640 lowest_rq = NULL;
1641 }
1642
1643 return lowest_rq;
1644 }
1645
1646 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1647 {
1648 struct task_struct *p;
1649
1650 if (!has_pushable_tasks(rq))
1651 return NULL;
1652
1653 p = plist_first_entry(&rq->rt.pushable_tasks,
1654 struct task_struct, pushable_tasks);
1655
1656 BUG_ON(rq->cpu != task_cpu(p));
1657 BUG_ON(task_current(rq, p));
1658 BUG_ON(p->nr_cpus_allowed <= 1);
1659
1660 BUG_ON(!task_on_rq_queued(p));
1661 BUG_ON(!rt_task(p));
1662
1663 return p;
1664 }
1665
1666 /*
1667 * If the current CPU has more than one RT task, see if the non
1668 * running task can migrate over to a CPU that is running a task
1669 * of lesser priority.
1670 */
1671 static int push_rt_task(struct rq *rq)
1672 {
1673 struct task_struct *next_task;
1674 struct rq *lowest_rq;
1675 int ret = 0;
1676
1677 if (!rq->rt.overloaded)
1678 return 0;
1679
1680 next_task = pick_next_pushable_task(rq);
1681 if (!next_task)
1682 return 0;
1683
1684 retry:
1685 if (unlikely(next_task == rq->curr)) {
1686 WARN_ON(1);
1687 return 0;
1688 }
1689
1690 /*
1691 * It's possible that the next_task slipped in of
1692 * higher priority than current. If that's the case
1693 * just reschedule current.
1694 */
1695 if (unlikely(next_task->prio < rq->curr->prio)) {
1696 resched_curr(rq);
1697 return 0;
1698 }
1699
1700 /* We might release rq lock */
1701 get_task_struct(next_task);
1702
1703 /* find_lock_lowest_rq locks the rq if found */
1704 lowest_rq = find_lock_lowest_rq(next_task, rq);
1705 if (!lowest_rq) {
1706 struct task_struct *task;
1707 /*
1708 * find_lock_lowest_rq releases rq->lock
1709 * so it is possible that next_task has migrated.
1710 *
1711 * We need to make sure that the task is still on the same
1712 * run-queue and is also still the next task eligible for
1713 * pushing.
1714 */
1715 task = pick_next_pushable_task(rq);
1716 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1717 /*
1718 * The task hasn't migrated, and is still the next
1719 * eligible task, but we failed to find a run-queue
1720 * to push it to. Do not retry in this case, since
1721 * other cpus will pull from us when ready.
1722 */
1723 goto out;
1724 }
1725
1726 if (!task)
1727 /* No more tasks, just exit */
1728 goto out;
1729
1730 /*
1731 * Something has shifted, try again.
1732 */
1733 put_task_struct(next_task);
1734 next_task = task;
1735 goto retry;
1736 }
1737
1738 deactivate_task(rq, next_task, 0);
1739 set_task_cpu(next_task, lowest_rq->cpu);
1740 activate_task(lowest_rq, next_task, 0);
1741 ret = 1;
1742
1743 resched_curr(lowest_rq);
1744
1745 double_unlock_balance(rq, lowest_rq);
1746
1747 out:
1748 put_task_struct(next_task);
1749
1750 return ret;
1751 }
1752
1753 static void push_rt_tasks(struct rq *rq)
1754 {
1755 /* push_rt_task will return true if it moved an RT */
1756 while (push_rt_task(rq))
1757 ;
1758 }
1759
1760 static int pull_rt_task(struct rq *this_rq)
1761 {
1762 int this_cpu = this_rq->cpu, ret = 0, cpu;
1763 struct task_struct *p;
1764 struct rq *src_rq;
1765
1766 if (likely(!rt_overloaded(this_rq)))
1767 return 0;
1768
1769 /*
1770 * Match the barrier from rt_set_overloaded; this guarantees that if we
1771 * see overloaded we must also see the rto_mask bit.
1772 */
1773 smp_rmb();
1774
1775 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1776 if (this_cpu == cpu)
1777 continue;
1778
1779 src_rq = cpu_rq(cpu);
1780
1781 /*
1782 * Don't bother taking the src_rq->lock if the next highest
1783 * task is known to be lower-priority than our current task.
1784 * This may look racy, but if this value is about to go
1785 * logically higher, the src_rq will push this task away.
1786 * And if its going logically lower, we do not care
1787 */
1788 if (src_rq->rt.highest_prio.next >=
1789 this_rq->rt.highest_prio.curr)
1790 continue;
1791
1792 /*
1793 * We can potentially drop this_rq's lock in
1794 * double_lock_balance, and another CPU could
1795 * alter this_rq
1796 */
1797 double_lock_balance(this_rq, src_rq);
1798
1799 /*
1800 * We can pull only a task, which is pushable
1801 * on its rq, and no others.
1802 */
1803 p = pick_highest_pushable_task(src_rq, this_cpu);
1804
1805 /*
1806 * Do we have an RT task that preempts
1807 * the to-be-scheduled task?
1808 */
1809 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1810 WARN_ON(p == src_rq->curr);
1811 WARN_ON(!task_on_rq_queued(p));
1812
1813 /*
1814 * There's a chance that p is higher in priority
1815 * than what's currently running on its cpu.
1816 * This is just that p is wakeing up and hasn't
1817 * had a chance to schedule. We only pull
1818 * p if it is lower in priority than the
1819 * current task on the run queue
1820 */
1821 if (p->prio < src_rq->curr->prio)
1822 goto skip;
1823
1824 ret = 1;
1825
1826 deactivate_task(src_rq, p, 0);
1827 set_task_cpu(p, this_cpu);
1828 activate_task(this_rq, p, 0);
1829 /*
1830 * We continue with the search, just in
1831 * case there's an even higher prio task
1832 * in another runqueue. (low likelihood
1833 * but possible)
1834 */
1835 }
1836 skip:
1837 double_unlock_balance(this_rq, src_rq);
1838 }
1839
1840 return ret;
1841 }
1842
1843 static void post_schedule_rt(struct rq *rq)
1844 {
1845 push_rt_tasks(rq);
1846 }
1847
1848 /*
1849 * If we are not running and we are not going to reschedule soon, we should
1850 * try to push tasks away now
1851 */
1852 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1853 {
1854 if (!task_running(rq, p) &&
1855 !test_tsk_need_resched(rq->curr) &&
1856 has_pushable_tasks(rq) &&
1857 p->nr_cpus_allowed > 1 &&
1858 (dl_task(rq->curr) || rt_task(rq->curr)) &&
1859 (rq->curr->nr_cpus_allowed < 2 ||
1860 rq->curr->prio <= p->prio))
1861 push_rt_tasks(rq);
1862 }
1863
1864 static void set_cpus_allowed_rt(struct task_struct *p,
1865 const struct cpumask *new_mask)
1866 {
1867 struct rq *rq;
1868 int weight;
1869
1870 BUG_ON(!rt_task(p));
1871
1872 if (!task_on_rq_queued(p))
1873 return;
1874
1875 weight = cpumask_weight(new_mask);
1876
1877 /*
1878 * Only update if the process changes its state from whether it
1879 * can migrate or not.
1880 */
1881 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1882 return;
1883
1884 rq = task_rq(p);
1885
1886 /*
1887 * The process used to be able to migrate OR it can now migrate
1888 */
1889 if (weight <= 1) {
1890 if (!task_current(rq, p))
1891 dequeue_pushable_task(rq, p);
1892 BUG_ON(!rq->rt.rt_nr_migratory);
1893 rq->rt.rt_nr_migratory--;
1894 } else {
1895 if (!task_current(rq, p))
1896 enqueue_pushable_task(rq, p);
1897 rq->rt.rt_nr_migratory++;
1898 }
1899
1900 update_rt_migration(&rq->rt);
1901 }
1902
1903 /* Assumes rq->lock is held */
1904 static void rq_online_rt(struct rq *rq)
1905 {
1906 if (rq->rt.overloaded)
1907 rt_set_overload(rq);
1908
1909 __enable_runtime(rq);
1910
1911 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1912 }
1913
1914 /* Assumes rq->lock is held */
1915 static void rq_offline_rt(struct rq *rq)
1916 {
1917 if (rq->rt.overloaded)
1918 rt_clear_overload(rq);
1919
1920 __disable_runtime(rq);
1921
1922 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1923 }
1924
1925 /*
1926 * When switch from the rt queue, we bring ourselves to a position
1927 * that we might want to pull RT tasks from other runqueues.
1928 */
1929 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1930 {
1931 /*
1932 * If there are other RT tasks then we will reschedule
1933 * and the scheduling of the other RT tasks will handle
1934 * the balancing. But if we are the last RT task
1935 * we may need to handle the pulling of RT tasks
1936 * now.
1937 */
1938 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1939 return;
1940
1941 if (pull_rt_task(rq))
1942 resched_curr(rq);
1943 }
1944
1945 void __init init_sched_rt_class(void)
1946 {
1947 unsigned int i;
1948
1949 for_each_possible_cpu(i) {
1950 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1951 GFP_KERNEL, cpu_to_node(i));
1952 }
1953 }
1954 #endif /* CONFIG_SMP */
1955
1956 /*
1957 * When switching a task to RT, we may overload the runqueue
1958 * with RT tasks. In this case we try to push them off to
1959 * other runqueues.
1960 */
1961 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1962 {
1963 int check_resched = 1;
1964
1965 /*
1966 * If we are already running, then there's nothing
1967 * that needs to be done. But if we are not running
1968 * we may need to preempt the current running task.
1969 * If that current running task is also an RT task
1970 * then see if we can move to another run queue.
1971 */
1972 if (task_on_rq_queued(p) && rq->curr != p) {
1973 #ifdef CONFIG_SMP
1974 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1975 /* Don't resched if we changed runqueues */
1976 push_rt_task(rq) && rq != task_rq(p))
1977 check_resched = 0;
1978 #endif /* CONFIG_SMP */
1979 if (check_resched && p->prio < rq->curr->prio)
1980 resched_curr(rq);
1981 }
1982 }
1983
1984 /*
1985 * Priority of the task has changed. This may cause
1986 * us to initiate a push or pull.
1987 */
1988 static void
1989 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1990 {
1991 if (!task_on_rq_queued(p))
1992 return;
1993
1994 if (rq->curr == p) {
1995 #ifdef CONFIG_SMP
1996 /*
1997 * If our priority decreases while running, we
1998 * may need to pull tasks to this runqueue.
1999 */
2000 if (oldprio < p->prio)
2001 pull_rt_task(rq);
2002 /*
2003 * If there's a higher priority task waiting to run
2004 * then reschedule. Note, the above pull_rt_task
2005 * can release the rq lock and p could migrate.
2006 * Only reschedule if p is still on the same runqueue.
2007 */
2008 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2009 resched_curr(rq);
2010 #else
2011 /* For UP simply resched on drop of prio */
2012 if (oldprio < p->prio)
2013 resched_curr(rq);
2014 #endif /* CONFIG_SMP */
2015 } else {
2016 /*
2017 * This task is not running, but if it is
2018 * greater than the current running task
2019 * then reschedule.
2020 */
2021 if (p->prio < rq->curr->prio)
2022 resched_curr(rq);
2023 }
2024 }
2025
2026 static void watchdog(struct rq *rq, struct task_struct *p)
2027 {
2028 unsigned long soft, hard;
2029
2030 /* max may change after cur was read, this will be fixed next tick */
2031 soft = task_rlimit(p, RLIMIT_RTTIME);
2032 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2033
2034 if (soft != RLIM_INFINITY) {
2035 unsigned long next;
2036
2037 if (p->rt.watchdog_stamp != jiffies) {
2038 p->rt.timeout++;
2039 p->rt.watchdog_stamp = jiffies;
2040 }
2041
2042 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2043 if (p->rt.timeout > next)
2044 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2045 }
2046 }
2047
2048 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2049 {
2050 struct sched_rt_entity *rt_se = &p->rt;
2051
2052 update_curr_rt(rq);
2053
2054 watchdog(rq, p);
2055
2056 /*
2057 * RR tasks need a special form of timeslice management.
2058 * FIFO tasks have no timeslices.
2059 */
2060 if (p->policy != SCHED_RR)
2061 return;
2062
2063 if (--p->rt.time_slice)
2064 return;
2065
2066 p->rt.time_slice = sched_rr_timeslice;
2067
2068 /*
2069 * Requeue to the end of queue if we (and all of our ancestors) are not
2070 * the only element on the queue
2071 */
2072 for_each_sched_rt_entity(rt_se) {
2073 if (rt_se->run_list.prev != rt_se->run_list.next) {
2074 requeue_task_rt(rq, p, 0);
2075 resched_curr(rq);
2076 return;
2077 }
2078 }
2079 }
2080
2081 static void set_curr_task_rt(struct rq *rq)
2082 {
2083 struct task_struct *p = rq->curr;
2084
2085 p->se.exec_start = rq_clock_task(rq);
2086
2087 /* The running task is never eligible for pushing */
2088 dequeue_pushable_task(rq, p);
2089 }
2090
2091 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2092 {
2093 /*
2094 * Time slice is 0 for SCHED_FIFO tasks
2095 */
2096 if (task->policy == SCHED_RR)
2097 return sched_rr_timeslice;
2098 else
2099 return 0;
2100 }
2101
2102 const struct sched_class rt_sched_class = {
2103 .next = &fair_sched_class,
2104 .enqueue_task = enqueue_task_rt,
2105 .dequeue_task = dequeue_task_rt,
2106 .yield_task = yield_task_rt,
2107
2108 .check_preempt_curr = check_preempt_curr_rt,
2109
2110 .pick_next_task = pick_next_task_rt,
2111 .put_prev_task = put_prev_task_rt,
2112
2113 #ifdef CONFIG_SMP
2114 .select_task_rq = select_task_rq_rt,
2115
2116 .set_cpus_allowed = set_cpus_allowed_rt,
2117 .rq_online = rq_online_rt,
2118 .rq_offline = rq_offline_rt,
2119 .post_schedule = post_schedule_rt,
2120 .task_woken = task_woken_rt,
2121 .switched_from = switched_from_rt,
2122 #endif
2123
2124 .set_curr_task = set_curr_task_rt,
2125 .task_tick = task_tick_rt,
2126
2127 .get_rr_interval = get_rr_interval_rt,
2128
2129 .prio_changed = prio_changed_rt,
2130 .switched_to = switched_to_rt,
2131
2132 .update_curr = update_curr_rt,
2133 };
2134
2135 #ifdef CONFIG_SCHED_DEBUG
2136 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2137
2138 void print_rt_stats(struct seq_file *m, int cpu)
2139 {
2140 rt_rq_iter_t iter;
2141 struct rt_rq *rt_rq;
2142
2143 rcu_read_lock();
2144 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2145 print_rt_rq(m, cpu, rt_rq);
2146 rcu_read_unlock();
2147 }
2148 #endif /* CONFIG_SCHED_DEBUG */