]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/sched/fair.c
Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / kernel / sched / fair.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 *
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 *
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 *
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 *
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 *
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22 */
23 #include <linux/energy_model.h>
24 #include <linux/mmap_lock.h>
25 #include <linux/hugetlb_inline.h>
26 #include <linux/jiffies.h>
27 #include <linux/mm_api.h>
28 #include <linux/highmem.h>
29 #include <linux/spinlock_api.h>
30 #include <linux/cpumask_api.h>
31 #include <linux/lockdep_api.h>
32 #include <linux/softirq.h>
33 #include <linux/refcount_api.h>
34 #include <linux/topology.h>
35 #include <linux/sched/clock.h>
36 #include <linux/sched/cond_resched.h>
37 #include <linux/sched/cputime.h>
38 #include <linux/sched/isolation.h>
39 #include <linux/sched/nohz.h>
40
41 #include <linux/cpuidle.h>
42 #include <linux/interrupt.h>
43 #include <linux/memory-tiers.h>
44 #include <linux/mempolicy.h>
45 #include <linux/mutex_api.h>
46 #include <linux/profile.h>
47 #include <linux/psi.h>
48 #include <linux/ratelimit.h>
49 #include <linux/task_work.h>
50
51 #include <asm/switch_to.h>
52
53 #include <linux/sched/cond_resched.h>
54
55 #include "sched.h"
56 #include "stats.h"
57 #include "autogroup.h"
58
59 /*
60 * Targeted preemption latency for CPU-bound tasks:
61 *
62 * NOTE: this latency value is not the same as the concept of
63 * 'timeslice length' - timeslices in CFS are of variable length
64 * and have no persistent notion like in traditional, time-slice
65 * based scheduling concepts.
66 *
67 * (to see the precise effective timeslice length of your workload,
68 * run vmstat and monitor the context-switches (cs) field)
69 *
70 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
71 */
72 unsigned int sysctl_sched_latency = 6000000ULL;
73 static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
74
75 /*
76 * The initial- and re-scaling of tunables is configurable
77 *
78 * Options are:
79 *
80 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
81 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
82 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
83 *
84 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
85 */
86 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
87
88 /*
89 * Minimal preemption granularity for CPU-bound tasks:
90 *
91 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
92 */
93 unsigned int sysctl_sched_min_granularity = 750000ULL;
94 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
95
96 /*
97 * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
98 * Applies only when SCHED_IDLE tasks compete with normal tasks.
99 *
100 * (default: 0.75 msec)
101 */
102 unsigned int sysctl_sched_idle_min_granularity = 750000ULL;
103
104 /*
105 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
106 */
107 static unsigned int sched_nr_latency = 8;
108
109 /*
110 * After fork, child runs first. If set to 0 (default) then
111 * parent will (try to) run first.
112 */
113 unsigned int sysctl_sched_child_runs_first __read_mostly;
114
115 /*
116 * SCHED_OTHER wake-up granularity.
117 *
118 * This option delays the preemption effects of decoupled workloads
119 * and reduces their over-scheduling. Synchronous workloads will still
120 * have immediate wakeup/sleep latencies.
121 *
122 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
123 */
124 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
125 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
126
127 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
128
129 int sched_thermal_decay_shift;
130 static int __init setup_sched_thermal_decay_shift(char *str)
131 {
132 int _shift = 0;
133
134 if (kstrtoint(str, 0, &_shift))
135 pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
136
137 sched_thermal_decay_shift = clamp(_shift, 0, 10);
138 return 1;
139 }
140 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
141
142 #ifdef CONFIG_SMP
143 /*
144 * For asym packing, by default the lower numbered CPU has higher priority.
145 */
146 int __weak arch_asym_cpu_priority(int cpu)
147 {
148 return -cpu;
149 }
150
151 /*
152 * The margin used when comparing utilization with CPU capacity.
153 *
154 * (default: ~20%)
155 */
156 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
157
158 /*
159 * The margin used when comparing CPU capacities.
160 * is 'cap1' noticeably greater than 'cap2'
161 *
162 * (default: ~5%)
163 */
164 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
165 #endif
166
167 #ifdef CONFIG_CFS_BANDWIDTH
168 /*
169 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
170 * each time a cfs_rq requests quota.
171 *
172 * Note: in the case that the slice exceeds the runtime remaining (either due
173 * to consumption or the quota being specified to be smaller than the slice)
174 * we will always only issue the remaining available time.
175 *
176 * (default: 5 msec, units: microseconds)
177 */
178 static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
179 #endif
180
181 #ifdef CONFIG_NUMA_BALANCING
182 /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
183 static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
184 #endif
185
186 #ifdef CONFIG_SYSCTL
187 static struct ctl_table sched_fair_sysctls[] = {
188 {
189 .procname = "sched_child_runs_first",
190 .data = &sysctl_sched_child_runs_first,
191 .maxlen = sizeof(unsigned int),
192 .mode = 0644,
193 .proc_handler = proc_dointvec,
194 },
195 #ifdef CONFIG_CFS_BANDWIDTH
196 {
197 .procname = "sched_cfs_bandwidth_slice_us",
198 .data = &sysctl_sched_cfs_bandwidth_slice,
199 .maxlen = sizeof(unsigned int),
200 .mode = 0644,
201 .proc_handler = proc_dointvec_minmax,
202 .extra1 = SYSCTL_ONE,
203 },
204 #endif
205 #ifdef CONFIG_NUMA_BALANCING
206 {
207 .procname = "numa_balancing_promote_rate_limit_MBps",
208 .data = &sysctl_numa_balancing_promote_rate_limit,
209 .maxlen = sizeof(unsigned int),
210 .mode = 0644,
211 .proc_handler = proc_dointvec_minmax,
212 .extra1 = SYSCTL_ZERO,
213 },
214 #endif /* CONFIG_NUMA_BALANCING */
215 {}
216 };
217
218 static int __init sched_fair_sysctl_init(void)
219 {
220 register_sysctl_init("kernel", sched_fair_sysctls);
221 return 0;
222 }
223 late_initcall(sched_fair_sysctl_init);
224 #endif
225
226 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
227 {
228 lw->weight += inc;
229 lw->inv_weight = 0;
230 }
231
232 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
233 {
234 lw->weight -= dec;
235 lw->inv_weight = 0;
236 }
237
238 static inline void update_load_set(struct load_weight *lw, unsigned long w)
239 {
240 lw->weight = w;
241 lw->inv_weight = 0;
242 }
243
244 /*
245 * Increase the granularity value when there are more CPUs,
246 * because with more CPUs the 'effective latency' as visible
247 * to users decreases. But the relationship is not linear,
248 * so pick a second-best guess by going with the log2 of the
249 * number of CPUs.
250 *
251 * This idea comes from the SD scheduler of Con Kolivas:
252 */
253 static unsigned int get_update_sysctl_factor(void)
254 {
255 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
256 unsigned int factor;
257
258 switch (sysctl_sched_tunable_scaling) {
259 case SCHED_TUNABLESCALING_NONE:
260 factor = 1;
261 break;
262 case SCHED_TUNABLESCALING_LINEAR:
263 factor = cpus;
264 break;
265 case SCHED_TUNABLESCALING_LOG:
266 default:
267 factor = 1 + ilog2(cpus);
268 break;
269 }
270
271 return factor;
272 }
273
274 static void update_sysctl(void)
275 {
276 unsigned int factor = get_update_sysctl_factor();
277
278 #define SET_SYSCTL(name) \
279 (sysctl_##name = (factor) * normalized_sysctl_##name)
280 SET_SYSCTL(sched_min_granularity);
281 SET_SYSCTL(sched_latency);
282 SET_SYSCTL(sched_wakeup_granularity);
283 #undef SET_SYSCTL
284 }
285
286 void __init sched_init_granularity(void)
287 {
288 update_sysctl();
289 }
290
291 #define WMULT_CONST (~0U)
292 #define WMULT_SHIFT 32
293
294 static void __update_inv_weight(struct load_weight *lw)
295 {
296 unsigned long w;
297
298 if (likely(lw->inv_weight))
299 return;
300
301 w = scale_load_down(lw->weight);
302
303 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
304 lw->inv_weight = 1;
305 else if (unlikely(!w))
306 lw->inv_weight = WMULT_CONST;
307 else
308 lw->inv_weight = WMULT_CONST / w;
309 }
310
311 /*
312 * delta_exec * weight / lw.weight
313 * OR
314 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
315 *
316 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
317 * we're guaranteed shift stays positive because inv_weight is guaranteed to
318 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
319 *
320 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
321 * weight/lw.weight <= 1, and therefore our shift will also be positive.
322 */
323 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
324 {
325 u64 fact = scale_load_down(weight);
326 u32 fact_hi = (u32)(fact >> 32);
327 int shift = WMULT_SHIFT;
328 int fs;
329
330 __update_inv_weight(lw);
331
332 if (unlikely(fact_hi)) {
333 fs = fls(fact_hi);
334 shift -= fs;
335 fact >>= fs;
336 }
337
338 fact = mul_u32_u32(fact, lw->inv_weight);
339
340 fact_hi = (u32)(fact >> 32);
341 if (fact_hi) {
342 fs = fls(fact_hi);
343 shift -= fs;
344 fact >>= fs;
345 }
346
347 return mul_u64_u32_shr(delta_exec, fact, shift);
348 }
349
350
351 const struct sched_class fair_sched_class;
352
353 /**************************************************************
354 * CFS operations on generic schedulable entities:
355 */
356
357 #ifdef CONFIG_FAIR_GROUP_SCHED
358
359 /* Walk up scheduling entities hierarchy */
360 #define for_each_sched_entity(se) \
361 for (; se; se = se->parent)
362
363 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
364 {
365 struct rq *rq = rq_of(cfs_rq);
366 int cpu = cpu_of(rq);
367
368 if (cfs_rq->on_list)
369 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
370
371 cfs_rq->on_list = 1;
372
373 /*
374 * Ensure we either appear before our parent (if already
375 * enqueued) or force our parent to appear after us when it is
376 * enqueued. The fact that we always enqueue bottom-up
377 * reduces this to two cases and a special case for the root
378 * cfs_rq. Furthermore, it also means that we will always reset
379 * tmp_alone_branch either when the branch is connected
380 * to a tree or when we reach the top of the tree
381 */
382 if (cfs_rq->tg->parent &&
383 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
384 /*
385 * If parent is already on the list, we add the child
386 * just before. Thanks to circular linked property of
387 * the list, this means to put the child at the tail
388 * of the list that starts by parent.
389 */
390 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
391 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
392 /*
393 * The branch is now connected to its tree so we can
394 * reset tmp_alone_branch to the beginning of the
395 * list.
396 */
397 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
398 return true;
399 }
400
401 if (!cfs_rq->tg->parent) {
402 /*
403 * cfs rq without parent should be put
404 * at the tail of the list.
405 */
406 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
407 &rq->leaf_cfs_rq_list);
408 /*
409 * We have reach the top of a tree so we can reset
410 * tmp_alone_branch to the beginning of the list.
411 */
412 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
413 return true;
414 }
415
416 /*
417 * The parent has not already been added so we want to
418 * make sure that it will be put after us.
419 * tmp_alone_branch points to the begin of the branch
420 * where we will add parent.
421 */
422 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
423 /*
424 * update tmp_alone_branch to points to the new begin
425 * of the branch
426 */
427 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
428 return false;
429 }
430
431 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
432 {
433 if (cfs_rq->on_list) {
434 struct rq *rq = rq_of(cfs_rq);
435
436 /*
437 * With cfs_rq being unthrottled/throttled during an enqueue,
438 * it can happen the tmp_alone_branch points the a leaf that
439 * we finally want to del. In this case, tmp_alone_branch moves
440 * to the prev element but it will point to rq->leaf_cfs_rq_list
441 * at the end of the enqueue.
442 */
443 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
444 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
445
446 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
447 cfs_rq->on_list = 0;
448 }
449 }
450
451 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
452 {
453 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
454 }
455
456 /* Iterate thr' all leaf cfs_rq's on a runqueue */
457 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
458 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
459 leaf_cfs_rq_list)
460
461 /* Do the two (enqueued) entities belong to the same group ? */
462 static inline struct cfs_rq *
463 is_same_group(struct sched_entity *se, struct sched_entity *pse)
464 {
465 if (se->cfs_rq == pse->cfs_rq)
466 return se->cfs_rq;
467
468 return NULL;
469 }
470
471 static inline struct sched_entity *parent_entity(const struct sched_entity *se)
472 {
473 return se->parent;
474 }
475
476 static void
477 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
478 {
479 int se_depth, pse_depth;
480
481 /*
482 * preemption test can be made between sibling entities who are in the
483 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
484 * both tasks until we find their ancestors who are siblings of common
485 * parent.
486 */
487
488 /* First walk up until both entities are at same depth */
489 se_depth = (*se)->depth;
490 pse_depth = (*pse)->depth;
491
492 while (se_depth > pse_depth) {
493 se_depth--;
494 *se = parent_entity(*se);
495 }
496
497 while (pse_depth > se_depth) {
498 pse_depth--;
499 *pse = parent_entity(*pse);
500 }
501
502 while (!is_same_group(*se, *pse)) {
503 *se = parent_entity(*se);
504 *pse = parent_entity(*pse);
505 }
506 }
507
508 static int tg_is_idle(struct task_group *tg)
509 {
510 return tg->idle > 0;
511 }
512
513 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
514 {
515 return cfs_rq->idle > 0;
516 }
517
518 static int se_is_idle(struct sched_entity *se)
519 {
520 if (entity_is_task(se))
521 return task_has_idle_policy(task_of(se));
522 return cfs_rq_is_idle(group_cfs_rq(se));
523 }
524
525 #else /* !CONFIG_FAIR_GROUP_SCHED */
526
527 #define for_each_sched_entity(se) \
528 for (; se; se = NULL)
529
530 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
531 {
532 return true;
533 }
534
535 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
536 {
537 }
538
539 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
540 {
541 }
542
543 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
544 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
545
546 static inline struct sched_entity *parent_entity(struct sched_entity *se)
547 {
548 return NULL;
549 }
550
551 static inline void
552 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
553 {
554 }
555
556 static inline int tg_is_idle(struct task_group *tg)
557 {
558 return 0;
559 }
560
561 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
562 {
563 return 0;
564 }
565
566 static int se_is_idle(struct sched_entity *se)
567 {
568 return 0;
569 }
570
571 #endif /* CONFIG_FAIR_GROUP_SCHED */
572
573 static __always_inline
574 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
575
576 /**************************************************************
577 * Scheduling class tree data structure manipulation methods:
578 */
579
580 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
581 {
582 s64 delta = (s64)(vruntime - max_vruntime);
583 if (delta > 0)
584 max_vruntime = vruntime;
585
586 return max_vruntime;
587 }
588
589 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
590 {
591 s64 delta = (s64)(vruntime - min_vruntime);
592 if (delta < 0)
593 min_vruntime = vruntime;
594
595 return min_vruntime;
596 }
597
598 static inline bool entity_before(const struct sched_entity *a,
599 const struct sched_entity *b)
600 {
601 return (s64)(a->vruntime - b->vruntime) < 0;
602 }
603
604 #define __node_2_se(node) \
605 rb_entry((node), struct sched_entity, run_node)
606
607 static void update_min_vruntime(struct cfs_rq *cfs_rq)
608 {
609 struct sched_entity *curr = cfs_rq->curr;
610 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
611
612 u64 vruntime = cfs_rq->min_vruntime;
613
614 if (curr) {
615 if (curr->on_rq)
616 vruntime = curr->vruntime;
617 else
618 curr = NULL;
619 }
620
621 if (leftmost) { /* non-empty tree */
622 struct sched_entity *se = __node_2_se(leftmost);
623
624 if (!curr)
625 vruntime = se->vruntime;
626 else
627 vruntime = min_vruntime(vruntime, se->vruntime);
628 }
629
630 /* ensure we never gain time by being placed backwards. */
631 u64_u32_store(cfs_rq->min_vruntime,
632 max_vruntime(cfs_rq->min_vruntime, vruntime));
633 }
634
635 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
636 {
637 return entity_before(__node_2_se(a), __node_2_se(b));
638 }
639
640 /*
641 * Enqueue an entity into the rb-tree:
642 */
643 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
644 {
645 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
646 }
647
648 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
649 {
650 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
651 }
652
653 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
654 {
655 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
656
657 if (!left)
658 return NULL;
659
660 return __node_2_se(left);
661 }
662
663 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
664 {
665 struct rb_node *next = rb_next(&se->run_node);
666
667 if (!next)
668 return NULL;
669
670 return __node_2_se(next);
671 }
672
673 #ifdef CONFIG_SCHED_DEBUG
674 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
675 {
676 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
677
678 if (!last)
679 return NULL;
680
681 return __node_2_se(last);
682 }
683
684 /**************************************************************
685 * Scheduling class statistics methods:
686 */
687
688 int sched_update_scaling(void)
689 {
690 unsigned int factor = get_update_sysctl_factor();
691
692 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
693 sysctl_sched_min_granularity);
694
695 #define WRT_SYSCTL(name) \
696 (normalized_sysctl_##name = sysctl_##name / (factor))
697 WRT_SYSCTL(sched_min_granularity);
698 WRT_SYSCTL(sched_latency);
699 WRT_SYSCTL(sched_wakeup_granularity);
700 #undef WRT_SYSCTL
701
702 return 0;
703 }
704 #endif
705
706 /*
707 * delta /= w
708 */
709 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
710 {
711 if (unlikely(se->load.weight != NICE_0_LOAD))
712 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
713
714 return delta;
715 }
716
717 /*
718 * The idea is to set a period in which each task runs once.
719 *
720 * When there are too many tasks (sched_nr_latency) we have to stretch
721 * this period because otherwise the slices get too small.
722 *
723 * p = (nr <= nl) ? l : l*nr/nl
724 */
725 static u64 __sched_period(unsigned long nr_running)
726 {
727 if (unlikely(nr_running > sched_nr_latency))
728 return nr_running * sysctl_sched_min_granularity;
729 else
730 return sysctl_sched_latency;
731 }
732
733 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq);
734
735 /*
736 * We calculate the wall-time slice from the period by taking a part
737 * proportional to the weight.
738 *
739 * s = p*P[w/rw]
740 */
741 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
742 {
743 unsigned int nr_running = cfs_rq->nr_running;
744 struct sched_entity *init_se = se;
745 unsigned int min_gran;
746 u64 slice;
747
748 if (sched_feat(ALT_PERIOD))
749 nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
750
751 slice = __sched_period(nr_running + !se->on_rq);
752
753 for_each_sched_entity(se) {
754 struct load_weight *load;
755 struct load_weight lw;
756 struct cfs_rq *qcfs_rq;
757
758 qcfs_rq = cfs_rq_of(se);
759 load = &qcfs_rq->load;
760
761 if (unlikely(!se->on_rq)) {
762 lw = qcfs_rq->load;
763
764 update_load_add(&lw, se->load.weight);
765 load = &lw;
766 }
767 slice = __calc_delta(slice, se->load.weight, load);
768 }
769
770 if (sched_feat(BASE_SLICE)) {
771 if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq))
772 min_gran = sysctl_sched_idle_min_granularity;
773 else
774 min_gran = sysctl_sched_min_granularity;
775
776 slice = max_t(u64, slice, min_gran);
777 }
778
779 return slice;
780 }
781
782 /*
783 * We calculate the vruntime slice of a to-be-inserted task.
784 *
785 * vs = s/w
786 */
787 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
788 {
789 return calc_delta_fair(sched_slice(cfs_rq, se), se);
790 }
791
792 #include "pelt.h"
793 #ifdef CONFIG_SMP
794
795 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
796 static unsigned long task_h_load(struct task_struct *p);
797 static unsigned long capacity_of(int cpu);
798
799 /* Give new sched_entity start runnable values to heavy its load in infant time */
800 void init_entity_runnable_average(struct sched_entity *se)
801 {
802 struct sched_avg *sa = &se->avg;
803
804 memset(sa, 0, sizeof(*sa));
805
806 /*
807 * Tasks are initialized with full load to be seen as heavy tasks until
808 * they get a chance to stabilize to their real load level.
809 * Group entities are initialized with zero load to reflect the fact that
810 * nothing has been attached to the task group yet.
811 */
812 if (entity_is_task(se))
813 sa->load_avg = scale_load_down(se->load.weight);
814
815 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
816 }
817
818 /*
819 * With new tasks being created, their initial util_avgs are extrapolated
820 * based on the cfs_rq's current util_avg:
821 *
822 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
823 *
824 * However, in many cases, the above util_avg does not give a desired
825 * value. Moreover, the sum of the util_avgs may be divergent, such
826 * as when the series is a harmonic series.
827 *
828 * To solve this problem, we also cap the util_avg of successive tasks to
829 * only 1/2 of the left utilization budget:
830 *
831 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
832 *
833 * where n denotes the nth task and cpu_scale the CPU capacity.
834 *
835 * For example, for a CPU with 1024 of capacity, a simplest series from
836 * the beginning would be like:
837 *
838 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
839 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
840 *
841 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
842 * if util_avg > util_avg_cap.
843 */
844 void post_init_entity_util_avg(struct task_struct *p)
845 {
846 struct sched_entity *se = &p->se;
847 struct cfs_rq *cfs_rq = cfs_rq_of(se);
848 struct sched_avg *sa = &se->avg;
849 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
850 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
851
852 if (p->sched_class != &fair_sched_class) {
853 /*
854 * For !fair tasks do:
855 *
856 update_cfs_rq_load_avg(now, cfs_rq);
857 attach_entity_load_avg(cfs_rq, se);
858 switched_from_fair(rq, p);
859 *
860 * such that the next switched_to_fair() has the
861 * expected state.
862 */
863 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
864 return;
865 }
866
867 if (cap > 0) {
868 if (cfs_rq->avg.util_avg != 0) {
869 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
870 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
871
872 if (sa->util_avg > cap)
873 sa->util_avg = cap;
874 } else {
875 sa->util_avg = cap;
876 }
877 }
878
879 sa->runnable_avg = sa->util_avg;
880 }
881
882 #else /* !CONFIG_SMP */
883 void init_entity_runnable_average(struct sched_entity *se)
884 {
885 }
886 void post_init_entity_util_avg(struct task_struct *p)
887 {
888 }
889 static void update_tg_load_avg(struct cfs_rq *cfs_rq)
890 {
891 }
892 #endif /* CONFIG_SMP */
893
894 /*
895 * Update the current task's runtime statistics.
896 */
897 static void update_curr(struct cfs_rq *cfs_rq)
898 {
899 struct sched_entity *curr = cfs_rq->curr;
900 u64 now = rq_clock_task(rq_of(cfs_rq));
901 u64 delta_exec;
902
903 if (unlikely(!curr))
904 return;
905
906 delta_exec = now - curr->exec_start;
907 if (unlikely((s64)delta_exec <= 0))
908 return;
909
910 curr->exec_start = now;
911
912 if (schedstat_enabled()) {
913 struct sched_statistics *stats;
914
915 stats = __schedstats_from_se(curr);
916 __schedstat_set(stats->exec_max,
917 max(delta_exec, stats->exec_max));
918 }
919
920 curr->sum_exec_runtime += delta_exec;
921 schedstat_add(cfs_rq->exec_clock, delta_exec);
922
923 curr->vruntime += calc_delta_fair(delta_exec, curr);
924 update_min_vruntime(cfs_rq);
925
926 if (entity_is_task(curr)) {
927 struct task_struct *curtask = task_of(curr);
928
929 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
930 cgroup_account_cputime(curtask, delta_exec);
931 account_group_exec_runtime(curtask, delta_exec);
932 }
933
934 account_cfs_rq_runtime(cfs_rq, delta_exec);
935 }
936
937 static void update_curr_fair(struct rq *rq)
938 {
939 update_curr(cfs_rq_of(&rq->curr->se));
940 }
941
942 static inline void
943 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
944 {
945 struct sched_statistics *stats;
946 struct task_struct *p = NULL;
947
948 if (!schedstat_enabled())
949 return;
950
951 stats = __schedstats_from_se(se);
952
953 if (entity_is_task(se))
954 p = task_of(se);
955
956 __update_stats_wait_start(rq_of(cfs_rq), p, stats);
957 }
958
959 static inline void
960 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
961 {
962 struct sched_statistics *stats;
963 struct task_struct *p = NULL;
964
965 if (!schedstat_enabled())
966 return;
967
968 stats = __schedstats_from_se(se);
969
970 /*
971 * When the sched_schedstat changes from 0 to 1, some sched se
972 * maybe already in the runqueue, the se->statistics.wait_start
973 * will be 0.So it will let the delta wrong. We need to avoid this
974 * scenario.
975 */
976 if (unlikely(!schedstat_val(stats->wait_start)))
977 return;
978
979 if (entity_is_task(se))
980 p = task_of(se);
981
982 __update_stats_wait_end(rq_of(cfs_rq), p, stats);
983 }
984
985 static inline void
986 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
987 {
988 struct sched_statistics *stats;
989 struct task_struct *tsk = NULL;
990
991 if (!schedstat_enabled())
992 return;
993
994 stats = __schedstats_from_se(se);
995
996 if (entity_is_task(se))
997 tsk = task_of(se);
998
999 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats);
1000 }
1001
1002 /*
1003 * Task is being enqueued - update stats:
1004 */
1005 static inline void
1006 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1007 {
1008 if (!schedstat_enabled())
1009 return;
1010
1011 /*
1012 * Are we enqueueing a waiting task? (for current tasks
1013 * a dequeue/enqueue event is a NOP)
1014 */
1015 if (se != cfs_rq->curr)
1016 update_stats_wait_start_fair(cfs_rq, se);
1017
1018 if (flags & ENQUEUE_WAKEUP)
1019 update_stats_enqueue_sleeper_fair(cfs_rq, se);
1020 }
1021
1022 static inline void
1023 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1024 {
1025
1026 if (!schedstat_enabled())
1027 return;
1028
1029 /*
1030 * Mark the end of the wait period if dequeueing a
1031 * waiting task:
1032 */
1033 if (se != cfs_rq->curr)
1034 update_stats_wait_end_fair(cfs_rq, se);
1035
1036 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1037 struct task_struct *tsk = task_of(se);
1038 unsigned int state;
1039
1040 /* XXX racy against TTWU */
1041 state = READ_ONCE(tsk->__state);
1042 if (state & TASK_INTERRUPTIBLE)
1043 __schedstat_set(tsk->stats.sleep_start,
1044 rq_clock(rq_of(cfs_rq)));
1045 if (state & TASK_UNINTERRUPTIBLE)
1046 __schedstat_set(tsk->stats.block_start,
1047 rq_clock(rq_of(cfs_rq)));
1048 }
1049 }
1050
1051 /*
1052 * We are picking a new current task - update its stats:
1053 */
1054 static inline void
1055 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1056 {
1057 /*
1058 * We are starting a new run period:
1059 */
1060 se->exec_start = rq_clock_task(rq_of(cfs_rq));
1061 }
1062
1063 /**************************************************
1064 * Scheduling class queueing methods:
1065 */
1066
1067 #ifdef CONFIG_NUMA
1068 #define NUMA_IMBALANCE_MIN 2
1069
1070 static inline long
1071 adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
1072 {
1073 /*
1074 * Allow a NUMA imbalance if busy CPUs is less than the maximum
1075 * threshold. Above this threshold, individual tasks may be contending
1076 * for both memory bandwidth and any shared HT resources. This is an
1077 * approximation as the number of running tasks may not be related to
1078 * the number of busy CPUs due to sched_setaffinity.
1079 */
1080 if (dst_running > imb_numa_nr)
1081 return imbalance;
1082
1083 /*
1084 * Allow a small imbalance based on a simple pair of communicating
1085 * tasks that remain local when the destination is lightly loaded.
1086 */
1087 if (imbalance <= NUMA_IMBALANCE_MIN)
1088 return 0;
1089
1090 return imbalance;
1091 }
1092 #endif /* CONFIG_NUMA */
1093
1094 #ifdef CONFIG_NUMA_BALANCING
1095 /*
1096 * Approximate time to scan a full NUMA task in ms. The task scan period is
1097 * calculated based on the tasks virtual memory size and
1098 * numa_balancing_scan_size.
1099 */
1100 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1101 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1102
1103 /* Portion of address space to scan in MB */
1104 unsigned int sysctl_numa_balancing_scan_size = 256;
1105
1106 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1107 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1108
1109 /* The page with hint page fault latency < threshold in ms is considered hot */
1110 unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC;
1111
1112 struct numa_group {
1113 refcount_t refcount;
1114
1115 spinlock_t lock; /* nr_tasks, tasks */
1116 int nr_tasks;
1117 pid_t gid;
1118 int active_nodes;
1119
1120 struct rcu_head rcu;
1121 unsigned long total_faults;
1122 unsigned long max_faults_cpu;
1123 /*
1124 * faults[] array is split into two regions: faults_mem and faults_cpu.
1125 *
1126 * Faults_cpu is used to decide whether memory should move
1127 * towards the CPU. As a consequence, these stats are weighted
1128 * more by CPU use than by memory faults.
1129 */
1130 unsigned long faults[];
1131 };
1132
1133 /*
1134 * For functions that can be called in multiple contexts that permit reading
1135 * ->numa_group (see struct task_struct for locking rules).
1136 */
1137 static struct numa_group *deref_task_numa_group(struct task_struct *p)
1138 {
1139 return rcu_dereference_check(p->numa_group, p == current ||
1140 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
1141 }
1142
1143 static struct numa_group *deref_curr_numa_group(struct task_struct *p)
1144 {
1145 return rcu_dereference_protected(p->numa_group, p == current);
1146 }
1147
1148 static inline unsigned long group_faults_priv(struct numa_group *ng);
1149 static inline unsigned long group_faults_shared(struct numa_group *ng);
1150
1151 static unsigned int task_nr_scan_windows(struct task_struct *p)
1152 {
1153 unsigned long rss = 0;
1154 unsigned long nr_scan_pages;
1155
1156 /*
1157 * Calculations based on RSS as non-present and empty pages are skipped
1158 * by the PTE scanner and NUMA hinting faults should be trapped based
1159 * on resident pages
1160 */
1161 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1162 rss = get_mm_rss(p->mm);
1163 if (!rss)
1164 rss = nr_scan_pages;
1165
1166 rss = round_up(rss, nr_scan_pages);
1167 return rss / nr_scan_pages;
1168 }
1169
1170 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1171 #define MAX_SCAN_WINDOW 2560
1172
1173 static unsigned int task_scan_min(struct task_struct *p)
1174 {
1175 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1176 unsigned int scan, floor;
1177 unsigned int windows = 1;
1178
1179 if (scan_size < MAX_SCAN_WINDOW)
1180 windows = MAX_SCAN_WINDOW / scan_size;
1181 floor = 1000 / windows;
1182
1183 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1184 return max_t(unsigned int, floor, scan);
1185 }
1186
1187 static unsigned int task_scan_start(struct task_struct *p)
1188 {
1189 unsigned long smin = task_scan_min(p);
1190 unsigned long period = smin;
1191 struct numa_group *ng;
1192
1193 /* Scale the maximum scan period with the amount of shared memory. */
1194 rcu_read_lock();
1195 ng = rcu_dereference(p->numa_group);
1196 if (ng) {
1197 unsigned long shared = group_faults_shared(ng);
1198 unsigned long private = group_faults_priv(ng);
1199
1200 period *= refcount_read(&ng->refcount);
1201 period *= shared + 1;
1202 period /= private + shared + 1;
1203 }
1204 rcu_read_unlock();
1205
1206 return max(smin, period);
1207 }
1208
1209 static unsigned int task_scan_max(struct task_struct *p)
1210 {
1211 unsigned long smin = task_scan_min(p);
1212 unsigned long smax;
1213 struct numa_group *ng;
1214
1215 /* Watch for min being lower than max due to floor calculations */
1216 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1217
1218 /* Scale the maximum scan period with the amount of shared memory. */
1219 ng = deref_curr_numa_group(p);
1220 if (ng) {
1221 unsigned long shared = group_faults_shared(ng);
1222 unsigned long private = group_faults_priv(ng);
1223 unsigned long period = smax;
1224
1225 period *= refcount_read(&ng->refcount);
1226 period *= shared + 1;
1227 period /= private + shared + 1;
1228
1229 smax = max(smax, period);
1230 }
1231
1232 return max(smin, smax);
1233 }
1234
1235 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1236 {
1237 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1238 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1239 }
1240
1241 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1242 {
1243 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1244 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1245 }
1246
1247 /* Shared or private faults. */
1248 #define NR_NUMA_HINT_FAULT_TYPES 2
1249
1250 /* Memory and CPU locality */
1251 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1252
1253 /* Averaged statistics, and temporary buffers. */
1254 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1255
1256 pid_t task_numa_group_id(struct task_struct *p)
1257 {
1258 struct numa_group *ng;
1259 pid_t gid = 0;
1260
1261 rcu_read_lock();
1262 ng = rcu_dereference(p->numa_group);
1263 if (ng)
1264 gid = ng->gid;
1265 rcu_read_unlock();
1266
1267 return gid;
1268 }
1269
1270 /*
1271 * The averaged statistics, shared & private, memory & CPU,
1272 * occupy the first half of the array. The second half of the
1273 * array is for current counters, which are averaged into the
1274 * first set by task_numa_placement.
1275 */
1276 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1277 {
1278 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1279 }
1280
1281 static inline unsigned long task_faults(struct task_struct *p, int nid)
1282 {
1283 if (!p->numa_faults)
1284 return 0;
1285
1286 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1287 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1288 }
1289
1290 static inline unsigned long group_faults(struct task_struct *p, int nid)
1291 {
1292 struct numa_group *ng = deref_task_numa_group(p);
1293
1294 if (!ng)
1295 return 0;
1296
1297 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1298 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1299 }
1300
1301 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1302 {
1303 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
1304 group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
1305 }
1306
1307 static inline unsigned long group_faults_priv(struct numa_group *ng)
1308 {
1309 unsigned long faults = 0;
1310 int node;
1311
1312 for_each_online_node(node) {
1313 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1314 }
1315
1316 return faults;
1317 }
1318
1319 static inline unsigned long group_faults_shared(struct numa_group *ng)
1320 {
1321 unsigned long faults = 0;
1322 int node;
1323
1324 for_each_online_node(node) {
1325 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1326 }
1327
1328 return faults;
1329 }
1330
1331 /*
1332 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1333 * considered part of a numa group's pseudo-interleaving set. Migrations
1334 * between these nodes are slowed down, to allow things to settle down.
1335 */
1336 #define ACTIVE_NODE_FRACTION 3
1337
1338 static bool numa_is_active_node(int nid, struct numa_group *ng)
1339 {
1340 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1341 }
1342
1343 /* Handle placement on systems where not all nodes are directly connected. */
1344 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1345 int lim_dist, bool task)
1346 {
1347 unsigned long score = 0;
1348 int node, max_dist;
1349
1350 /*
1351 * All nodes are directly connected, and the same distance
1352 * from each other. No need for fancy placement algorithms.
1353 */
1354 if (sched_numa_topology_type == NUMA_DIRECT)
1355 return 0;
1356
1357 /* sched_max_numa_distance may be changed in parallel. */
1358 max_dist = READ_ONCE(sched_max_numa_distance);
1359 /*
1360 * This code is called for each node, introducing N^2 complexity,
1361 * which should be ok given the number of nodes rarely exceeds 8.
1362 */
1363 for_each_online_node(node) {
1364 unsigned long faults;
1365 int dist = node_distance(nid, node);
1366
1367 /*
1368 * The furthest away nodes in the system are not interesting
1369 * for placement; nid was already counted.
1370 */
1371 if (dist >= max_dist || node == nid)
1372 continue;
1373
1374 /*
1375 * On systems with a backplane NUMA topology, compare groups
1376 * of nodes, and move tasks towards the group with the most
1377 * memory accesses. When comparing two nodes at distance
1378 * "hoplimit", only nodes closer by than "hoplimit" are part
1379 * of each group. Skip other nodes.
1380 */
1381 if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist)
1382 continue;
1383
1384 /* Add up the faults from nearby nodes. */
1385 if (task)
1386 faults = task_faults(p, node);
1387 else
1388 faults = group_faults(p, node);
1389
1390 /*
1391 * On systems with a glueless mesh NUMA topology, there are
1392 * no fixed "groups of nodes". Instead, nodes that are not
1393 * directly connected bounce traffic through intermediate
1394 * nodes; a numa_group can occupy any set of nodes.
1395 * The further away a node is, the less the faults count.
1396 * This seems to result in good task placement.
1397 */
1398 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1399 faults *= (max_dist - dist);
1400 faults /= (max_dist - LOCAL_DISTANCE);
1401 }
1402
1403 score += faults;
1404 }
1405
1406 return score;
1407 }
1408
1409 /*
1410 * These return the fraction of accesses done by a particular task, or
1411 * task group, on a particular numa node. The group weight is given a
1412 * larger multiplier, in order to group tasks together that are almost
1413 * evenly spread out between numa nodes.
1414 */
1415 static inline unsigned long task_weight(struct task_struct *p, int nid,
1416 int dist)
1417 {
1418 unsigned long faults, total_faults;
1419
1420 if (!p->numa_faults)
1421 return 0;
1422
1423 total_faults = p->total_numa_faults;
1424
1425 if (!total_faults)
1426 return 0;
1427
1428 faults = task_faults(p, nid);
1429 faults += score_nearby_nodes(p, nid, dist, true);
1430
1431 return 1000 * faults / total_faults;
1432 }
1433
1434 static inline unsigned long group_weight(struct task_struct *p, int nid,
1435 int dist)
1436 {
1437 struct numa_group *ng = deref_task_numa_group(p);
1438 unsigned long faults, total_faults;
1439
1440 if (!ng)
1441 return 0;
1442
1443 total_faults = ng->total_faults;
1444
1445 if (!total_faults)
1446 return 0;
1447
1448 faults = group_faults(p, nid);
1449 faults += score_nearby_nodes(p, nid, dist, false);
1450
1451 return 1000 * faults / total_faults;
1452 }
1453
1454 /*
1455 * If memory tiering mode is enabled, cpupid of slow memory page is
1456 * used to record scan time instead of CPU and PID. When tiering mode
1457 * is disabled at run time, the scan time (in cpupid) will be
1458 * interpreted as CPU and PID. So CPU needs to be checked to avoid to
1459 * access out of array bound.
1460 */
1461 static inline bool cpupid_valid(int cpupid)
1462 {
1463 return cpupid_to_cpu(cpupid) < nr_cpu_ids;
1464 }
1465
1466 /*
1467 * For memory tiering mode, if there are enough free pages (more than
1468 * enough watermark defined here) in fast memory node, to take full
1469 * advantage of fast memory capacity, all recently accessed slow
1470 * memory pages will be migrated to fast memory node without
1471 * considering hot threshold.
1472 */
1473 static bool pgdat_free_space_enough(struct pglist_data *pgdat)
1474 {
1475 int z;
1476 unsigned long enough_wmark;
1477
1478 enough_wmark = max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT,
1479 pgdat->node_present_pages >> 4);
1480 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1481 struct zone *zone = pgdat->node_zones + z;
1482
1483 if (!populated_zone(zone))
1484 continue;
1485
1486 if (zone_watermark_ok(zone, 0,
1487 wmark_pages(zone, WMARK_PROMO) + enough_wmark,
1488 ZONE_MOVABLE, 0))
1489 return true;
1490 }
1491 return false;
1492 }
1493
1494 /*
1495 * For memory tiering mode, when page tables are scanned, the scan
1496 * time will be recorded in struct page in addition to make page
1497 * PROT_NONE for slow memory page. So when the page is accessed, in
1498 * hint page fault handler, the hint page fault latency is calculated
1499 * via,
1500 *
1501 * hint page fault latency = hint page fault time - scan time
1502 *
1503 * The smaller the hint page fault latency, the higher the possibility
1504 * for the page to be hot.
1505 */
1506 static int numa_hint_fault_latency(struct page *page)
1507 {
1508 int last_time, time;
1509
1510 time = jiffies_to_msecs(jiffies);
1511 last_time = xchg_page_access_time(page, time);
1512
1513 return (time - last_time) & PAGE_ACCESS_TIME_MASK;
1514 }
1515
1516 /*
1517 * For memory tiering mode, too high promotion/demotion throughput may
1518 * hurt application latency. So we provide a mechanism to rate limit
1519 * the number of pages that are tried to be promoted.
1520 */
1521 static bool numa_promotion_rate_limit(struct pglist_data *pgdat,
1522 unsigned long rate_limit, int nr)
1523 {
1524 unsigned long nr_cand;
1525 unsigned int now, start;
1526
1527 now = jiffies_to_msecs(jiffies);
1528 mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr);
1529 nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
1530 start = pgdat->nbp_rl_start;
1531 if (now - start > MSEC_PER_SEC &&
1532 cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
1533 pgdat->nbp_rl_nr_cand = nr_cand;
1534 if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit)
1535 return true;
1536 return false;
1537 }
1538
1539 #define NUMA_MIGRATION_ADJUST_STEPS 16
1540
1541 static void numa_promotion_adjust_threshold(struct pglist_data *pgdat,
1542 unsigned long rate_limit,
1543 unsigned int ref_th)
1544 {
1545 unsigned int now, start, th_period, unit_th, th;
1546 unsigned long nr_cand, ref_cand, diff_cand;
1547
1548 now = jiffies_to_msecs(jiffies);
1549 th_period = sysctl_numa_balancing_scan_period_max;
1550 start = pgdat->nbp_th_start;
1551 if (now - start > th_period &&
1552 cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
1553 ref_cand = rate_limit *
1554 sysctl_numa_balancing_scan_period_max / MSEC_PER_SEC;
1555 nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
1556 diff_cand = nr_cand - pgdat->nbp_th_nr_cand;
1557 unit_th = ref_th * 2 / NUMA_MIGRATION_ADJUST_STEPS;
1558 th = pgdat->nbp_threshold ? : ref_th;
1559 if (diff_cand > ref_cand * 11 / 10)
1560 th = max(th - unit_th, unit_th);
1561 else if (diff_cand < ref_cand * 9 / 10)
1562 th = min(th + unit_th, ref_th * 2);
1563 pgdat->nbp_th_nr_cand = nr_cand;
1564 pgdat->nbp_threshold = th;
1565 }
1566 }
1567
1568 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1569 int src_nid, int dst_cpu)
1570 {
1571 struct numa_group *ng = deref_curr_numa_group(p);
1572 int dst_nid = cpu_to_node(dst_cpu);
1573 int last_cpupid, this_cpupid;
1574
1575 /*
1576 * The pages in slow memory node should be migrated according
1577 * to hot/cold instead of private/shared.
1578 */
1579 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1580 !node_is_toptier(src_nid)) {
1581 struct pglist_data *pgdat;
1582 unsigned long rate_limit;
1583 unsigned int latency, th, def_th;
1584
1585 pgdat = NODE_DATA(dst_nid);
1586 if (pgdat_free_space_enough(pgdat)) {
1587 /* workload changed, reset hot threshold */
1588 pgdat->nbp_threshold = 0;
1589 return true;
1590 }
1591
1592 def_th = sysctl_numa_balancing_hot_threshold;
1593 rate_limit = sysctl_numa_balancing_promote_rate_limit << \
1594 (20 - PAGE_SHIFT);
1595 numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
1596
1597 th = pgdat->nbp_threshold ? : def_th;
1598 latency = numa_hint_fault_latency(page);
1599 if (latency >= th)
1600 return false;
1601
1602 return !numa_promotion_rate_limit(pgdat, rate_limit,
1603 thp_nr_pages(page));
1604 }
1605
1606 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1607 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1608
1609 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
1610 !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid))
1611 return false;
1612
1613 /*
1614 * Allow first faults or private faults to migrate immediately early in
1615 * the lifetime of a task. The magic number 4 is based on waiting for
1616 * two full passes of the "multi-stage node selection" test that is
1617 * executed below.
1618 */
1619 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
1620 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
1621 return true;
1622
1623 /*
1624 * Multi-stage node selection is used in conjunction with a periodic
1625 * migration fault to build a temporal task<->page relation. By using
1626 * a two-stage filter we remove short/unlikely relations.
1627 *
1628 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1629 * a task's usage of a particular page (n_p) per total usage of this
1630 * page (n_t) (in a given time-span) to a probability.
1631 *
1632 * Our periodic faults will sample this probability and getting the
1633 * same result twice in a row, given these samples are fully
1634 * independent, is then given by P(n)^2, provided our sample period
1635 * is sufficiently short compared to the usage pattern.
1636 *
1637 * This quadric squishes small probabilities, making it less likely we
1638 * act on an unlikely task<->page relation.
1639 */
1640 if (!cpupid_pid_unset(last_cpupid) &&
1641 cpupid_to_nid(last_cpupid) != dst_nid)
1642 return false;
1643
1644 /* Always allow migrate on private faults */
1645 if (cpupid_match_pid(p, last_cpupid))
1646 return true;
1647
1648 /* A shared fault, but p->numa_group has not been set up yet. */
1649 if (!ng)
1650 return true;
1651
1652 /*
1653 * Destination node is much more heavily used than the source
1654 * node? Allow migration.
1655 */
1656 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1657 ACTIVE_NODE_FRACTION)
1658 return true;
1659
1660 /*
1661 * Distribute memory according to CPU & memory use on each node,
1662 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1663 *
1664 * faults_cpu(dst) 3 faults_cpu(src)
1665 * --------------- * - > ---------------
1666 * faults_mem(dst) 4 faults_mem(src)
1667 */
1668 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1669 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1670 }
1671
1672 /*
1673 * 'numa_type' describes the node at the moment of load balancing.
1674 */
1675 enum numa_type {
1676 /* The node has spare capacity that can be used to run more tasks. */
1677 node_has_spare = 0,
1678 /*
1679 * The node is fully used and the tasks don't compete for more CPU
1680 * cycles. Nevertheless, some tasks might wait before running.
1681 */
1682 node_fully_busy,
1683 /*
1684 * The node is overloaded and can't provide expected CPU cycles to all
1685 * tasks.
1686 */
1687 node_overloaded
1688 };
1689
1690 /* Cached statistics for all CPUs within a node */
1691 struct numa_stats {
1692 unsigned long load;
1693 unsigned long runnable;
1694 unsigned long util;
1695 /* Total compute capacity of CPUs on a node */
1696 unsigned long compute_capacity;
1697 unsigned int nr_running;
1698 unsigned int weight;
1699 enum numa_type node_type;
1700 int idle_cpu;
1701 };
1702
1703 static inline bool is_core_idle(int cpu)
1704 {
1705 #ifdef CONFIG_SCHED_SMT
1706 int sibling;
1707
1708 for_each_cpu(sibling, cpu_smt_mask(cpu)) {
1709 if (cpu == sibling)
1710 continue;
1711
1712 if (!idle_cpu(sibling))
1713 return false;
1714 }
1715 #endif
1716
1717 return true;
1718 }
1719
1720 struct task_numa_env {
1721 struct task_struct *p;
1722
1723 int src_cpu, src_nid;
1724 int dst_cpu, dst_nid;
1725 int imb_numa_nr;
1726
1727 struct numa_stats src_stats, dst_stats;
1728
1729 int imbalance_pct;
1730 int dist;
1731
1732 struct task_struct *best_task;
1733 long best_imp;
1734 int best_cpu;
1735 };
1736
1737 static unsigned long cpu_load(struct rq *rq);
1738 static unsigned long cpu_runnable(struct rq *rq);
1739
1740 static inline enum
1741 numa_type numa_classify(unsigned int imbalance_pct,
1742 struct numa_stats *ns)
1743 {
1744 if ((ns->nr_running > ns->weight) &&
1745 (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
1746 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
1747 return node_overloaded;
1748
1749 if ((ns->nr_running < ns->weight) ||
1750 (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
1751 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
1752 return node_has_spare;
1753
1754 return node_fully_busy;
1755 }
1756
1757 #ifdef CONFIG_SCHED_SMT
1758 /* Forward declarations of select_idle_sibling helpers */
1759 static inline bool test_idle_cores(int cpu);
1760 static inline int numa_idle_core(int idle_core, int cpu)
1761 {
1762 if (!static_branch_likely(&sched_smt_present) ||
1763 idle_core >= 0 || !test_idle_cores(cpu))
1764 return idle_core;
1765
1766 /*
1767 * Prefer cores instead of packing HT siblings
1768 * and triggering future load balancing.
1769 */
1770 if (is_core_idle(cpu))
1771 idle_core = cpu;
1772
1773 return idle_core;
1774 }
1775 #else
1776 static inline int numa_idle_core(int idle_core, int cpu)
1777 {
1778 return idle_core;
1779 }
1780 #endif
1781
1782 /*
1783 * Gather all necessary information to make NUMA balancing placement
1784 * decisions that are compatible with standard load balancer. This
1785 * borrows code and logic from update_sg_lb_stats but sharing a
1786 * common implementation is impractical.
1787 */
1788 static void update_numa_stats(struct task_numa_env *env,
1789 struct numa_stats *ns, int nid,
1790 bool find_idle)
1791 {
1792 int cpu, idle_core = -1;
1793
1794 memset(ns, 0, sizeof(*ns));
1795 ns->idle_cpu = -1;
1796
1797 rcu_read_lock();
1798 for_each_cpu(cpu, cpumask_of_node(nid)) {
1799 struct rq *rq = cpu_rq(cpu);
1800
1801 ns->load += cpu_load(rq);
1802 ns->runnable += cpu_runnable(rq);
1803 ns->util += cpu_util_cfs(cpu);
1804 ns->nr_running += rq->cfs.h_nr_running;
1805 ns->compute_capacity += capacity_of(cpu);
1806
1807 if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
1808 if (READ_ONCE(rq->numa_migrate_on) ||
1809 !cpumask_test_cpu(cpu, env->p->cpus_ptr))
1810 continue;
1811
1812 if (ns->idle_cpu == -1)
1813 ns->idle_cpu = cpu;
1814
1815 idle_core = numa_idle_core(idle_core, cpu);
1816 }
1817 }
1818 rcu_read_unlock();
1819
1820 ns->weight = cpumask_weight(cpumask_of_node(nid));
1821
1822 ns->node_type = numa_classify(env->imbalance_pct, ns);
1823
1824 if (idle_core >= 0)
1825 ns->idle_cpu = idle_core;
1826 }
1827
1828 static void task_numa_assign(struct task_numa_env *env,
1829 struct task_struct *p, long imp)
1830 {
1831 struct rq *rq = cpu_rq(env->dst_cpu);
1832
1833 /* Check if run-queue part of active NUMA balance. */
1834 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
1835 int cpu;
1836 int start = env->dst_cpu;
1837
1838 /* Find alternative idle CPU. */
1839 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) {
1840 if (cpu == env->best_cpu || !idle_cpu(cpu) ||
1841 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
1842 continue;
1843 }
1844
1845 env->dst_cpu = cpu;
1846 rq = cpu_rq(env->dst_cpu);
1847 if (!xchg(&rq->numa_migrate_on, 1))
1848 goto assign;
1849 }
1850
1851 /* Failed to find an alternative idle CPU */
1852 return;
1853 }
1854
1855 assign:
1856 /*
1857 * Clear previous best_cpu/rq numa-migrate flag, since task now
1858 * found a better CPU to move/swap.
1859 */
1860 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
1861 rq = cpu_rq(env->best_cpu);
1862 WRITE_ONCE(rq->numa_migrate_on, 0);
1863 }
1864
1865 if (env->best_task)
1866 put_task_struct(env->best_task);
1867 if (p)
1868 get_task_struct(p);
1869
1870 env->best_task = p;
1871 env->best_imp = imp;
1872 env->best_cpu = env->dst_cpu;
1873 }
1874
1875 static bool load_too_imbalanced(long src_load, long dst_load,
1876 struct task_numa_env *env)
1877 {
1878 long imb, old_imb;
1879 long orig_src_load, orig_dst_load;
1880 long src_capacity, dst_capacity;
1881
1882 /*
1883 * The load is corrected for the CPU capacity available on each node.
1884 *
1885 * src_load dst_load
1886 * ------------ vs ---------
1887 * src_capacity dst_capacity
1888 */
1889 src_capacity = env->src_stats.compute_capacity;
1890 dst_capacity = env->dst_stats.compute_capacity;
1891
1892 imb = abs(dst_load * src_capacity - src_load * dst_capacity);
1893
1894 orig_src_load = env->src_stats.load;
1895 orig_dst_load = env->dst_stats.load;
1896
1897 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
1898
1899 /* Would this change make things worse? */
1900 return (imb > old_imb);
1901 }
1902
1903 /*
1904 * Maximum NUMA importance can be 1998 (2*999);
1905 * SMALLIMP @ 30 would be close to 1998/64.
1906 * Used to deter task migration.
1907 */
1908 #define SMALLIMP 30
1909
1910 /*
1911 * This checks if the overall compute and NUMA accesses of the system would
1912 * be improved if the source tasks was migrated to the target dst_cpu taking
1913 * into account that it might be best if task running on the dst_cpu should
1914 * be exchanged with the source task
1915 */
1916 static bool task_numa_compare(struct task_numa_env *env,
1917 long taskimp, long groupimp, bool maymove)
1918 {
1919 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
1920 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1921 long imp = p_ng ? groupimp : taskimp;
1922 struct task_struct *cur;
1923 long src_load, dst_load;
1924 int dist = env->dist;
1925 long moveimp = imp;
1926 long load;
1927 bool stopsearch = false;
1928
1929 if (READ_ONCE(dst_rq->numa_migrate_on))
1930 return false;
1931
1932 rcu_read_lock();
1933 cur = rcu_dereference(dst_rq->curr);
1934 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1935 cur = NULL;
1936
1937 /*
1938 * Because we have preemption enabled we can get migrated around and
1939 * end try selecting ourselves (current == env->p) as a swap candidate.
1940 */
1941 if (cur == env->p) {
1942 stopsearch = true;
1943 goto unlock;
1944 }
1945
1946 if (!cur) {
1947 if (maymove && moveimp >= env->best_imp)
1948 goto assign;
1949 else
1950 goto unlock;
1951 }
1952
1953 /* Skip this swap candidate if cannot move to the source cpu. */
1954 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
1955 goto unlock;
1956
1957 /*
1958 * Skip this swap candidate if it is not moving to its preferred
1959 * node and the best task is.
1960 */
1961 if (env->best_task &&
1962 env->best_task->numa_preferred_nid == env->src_nid &&
1963 cur->numa_preferred_nid != env->src_nid) {
1964 goto unlock;
1965 }
1966
1967 /*
1968 * "imp" is the fault differential for the source task between the
1969 * source and destination node. Calculate the total differential for
1970 * the source task and potential destination task. The more negative
1971 * the value is, the more remote accesses that would be expected to
1972 * be incurred if the tasks were swapped.
1973 *
1974 * If dst and source tasks are in the same NUMA group, or not
1975 * in any group then look only at task weights.
1976 */
1977 cur_ng = rcu_dereference(cur->numa_group);
1978 if (cur_ng == p_ng) {
1979 /*
1980 * Do not swap within a group or between tasks that have
1981 * no group if there is spare capacity. Swapping does
1982 * not address the load imbalance and helps one task at
1983 * the cost of punishing another.
1984 */
1985 if (env->dst_stats.node_type == node_has_spare)
1986 goto unlock;
1987
1988 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1989 task_weight(cur, env->dst_nid, dist);
1990 /*
1991 * Add some hysteresis to prevent swapping the
1992 * tasks within a group over tiny differences.
1993 */
1994 if (cur_ng)
1995 imp -= imp / 16;
1996 } else {
1997 /*
1998 * Compare the group weights. If a task is all by itself
1999 * (not part of a group), use the task weight instead.
2000 */
2001 if (cur_ng && p_ng)
2002 imp += group_weight(cur, env->src_nid, dist) -
2003 group_weight(cur, env->dst_nid, dist);
2004 else
2005 imp += task_weight(cur, env->src_nid, dist) -
2006 task_weight(cur, env->dst_nid, dist);
2007 }
2008
2009 /* Discourage picking a task already on its preferred node */
2010 if (cur->numa_preferred_nid == env->dst_nid)
2011 imp -= imp / 16;
2012
2013 /*
2014 * Encourage picking a task that moves to its preferred node.
2015 * This potentially makes imp larger than it's maximum of
2016 * 1998 (see SMALLIMP and task_weight for why) but in this
2017 * case, it does not matter.
2018 */
2019 if (cur->numa_preferred_nid == env->src_nid)
2020 imp += imp / 8;
2021
2022 if (maymove && moveimp > imp && moveimp > env->best_imp) {
2023 imp = moveimp;
2024 cur = NULL;
2025 goto assign;
2026 }
2027
2028 /*
2029 * Prefer swapping with a task moving to its preferred node over a
2030 * task that is not.
2031 */
2032 if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
2033 env->best_task->numa_preferred_nid != env->src_nid) {
2034 goto assign;
2035 }
2036
2037 /*
2038 * If the NUMA importance is less than SMALLIMP,
2039 * task migration might only result in ping pong
2040 * of tasks and also hurt performance due to cache
2041 * misses.
2042 */
2043 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
2044 goto unlock;
2045
2046 /*
2047 * In the overloaded case, try and keep the load balanced.
2048 */
2049 load = task_h_load(env->p) - task_h_load(cur);
2050 if (!load)
2051 goto assign;
2052
2053 dst_load = env->dst_stats.load + load;
2054 src_load = env->src_stats.load - load;
2055
2056 if (load_too_imbalanced(src_load, dst_load, env))
2057 goto unlock;
2058
2059 assign:
2060 /* Evaluate an idle CPU for a task numa move. */
2061 if (!cur) {
2062 int cpu = env->dst_stats.idle_cpu;
2063
2064 /* Nothing cached so current CPU went idle since the search. */
2065 if (cpu < 0)
2066 cpu = env->dst_cpu;
2067
2068 /*
2069 * If the CPU is no longer truly idle and the previous best CPU
2070 * is, keep using it.
2071 */
2072 if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
2073 idle_cpu(env->best_cpu)) {
2074 cpu = env->best_cpu;
2075 }
2076
2077 env->dst_cpu = cpu;
2078 }
2079
2080 task_numa_assign(env, cur, imp);
2081
2082 /*
2083 * If a move to idle is allowed because there is capacity or load
2084 * balance improves then stop the search. While a better swap
2085 * candidate may exist, a search is not free.
2086 */
2087 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
2088 stopsearch = true;
2089
2090 /*
2091 * If a swap candidate must be identified and the current best task
2092 * moves its preferred node then stop the search.
2093 */
2094 if (!maymove && env->best_task &&
2095 env->best_task->numa_preferred_nid == env->src_nid) {
2096 stopsearch = true;
2097 }
2098 unlock:
2099 rcu_read_unlock();
2100
2101 return stopsearch;
2102 }
2103
2104 static void task_numa_find_cpu(struct task_numa_env *env,
2105 long taskimp, long groupimp)
2106 {
2107 bool maymove = false;
2108 int cpu;
2109
2110 /*
2111 * If dst node has spare capacity, then check if there is an
2112 * imbalance that would be overruled by the load balancer.
2113 */
2114 if (env->dst_stats.node_type == node_has_spare) {
2115 unsigned int imbalance;
2116 int src_running, dst_running;
2117
2118 /*
2119 * Would movement cause an imbalance? Note that if src has
2120 * more running tasks that the imbalance is ignored as the
2121 * move improves the imbalance from the perspective of the
2122 * CPU load balancer.
2123 * */
2124 src_running = env->src_stats.nr_running - 1;
2125 dst_running = env->dst_stats.nr_running + 1;
2126 imbalance = max(0, dst_running - src_running);
2127 imbalance = adjust_numa_imbalance(imbalance, dst_running,
2128 env->imb_numa_nr);
2129
2130 /* Use idle CPU if there is no imbalance */
2131 if (!imbalance) {
2132 maymove = true;
2133 if (env->dst_stats.idle_cpu >= 0) {
2134 env->dst_cpu = env->dst_stats.idle_cpu;
2135 task_numa_assign(env, NULL, 0);
2136 return;
2137 }
2138 }
2139 } else {
2140 long src_load, dst_load, load;
2141 /*
2142 * If the improvement from just moving env->p direction is better
2143 * than swapping tasks around, check if a move is possible.
2144 */
2145 load = task_h_load(env->p);
2146 dst_load = env->dst_stats.load + load;
2147 src_load = env->src_stats.load - load;
2148 maymove = !load_too_imbalanced(src_load, dst_load, env);
2149 }
2150
2151 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
2152 /* Skip this CPU if the source task cannot migrate */
2153 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
2154 continue;
2155
2156 env->dst_cpu = cpu;
2157 if (task_numa_compare(env, taskimp, groupimp, maymove))
2158 break;
2159 }
2160 }
2161
2162 static int task_numa_migrate(struct task_struct *p)
2163 {
2164 struct task_numa_env env = {
2165 .p = p,
2166
2167 .src_cpu = task_cpu(p),
2168 .src_nid = task_node(p),
2169
2170 .imbalance_pct = 112,
2171
2172 .best_task = NULL,
2173 .best_imp = 0,
2174 .best_cpu = -1,
2175 };
2176 unsigned long taskweight, groupweight;
2177 struct sched_domain *sd;
2178 long taskimp, groupimp;
2179 struct numa_group *ng;
2180 struct rq *best_rq;
2181 int nid, ret, dist;
2182
2183 /*
2184 * Pick the lowest SD_NUMA domain, as that would have the smallest
2185 * imbalance and would be the first to start moving tasks about.
2186 *
2187 * And we want to avoid any moving of tasks about, as that would create
2188 * random movement of tasks -- counter the numa conditions we're trying
2189 * to satisfy here.
2190 */
2191 rcu_read_lock();
2192 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
2193 if (sd) {
2194 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
2195 env.imb_numa_nr = sd->imb_numa_nr;
2196 }
2197 rcu_read_unlock();
2198
2199 /*
2200 * Cpusets can break the scheduler domain tree into smaller
2201 * balance domains, some of which do not cross NUMA boundaries.
2202 * Tasks that are "trapped" in such domains cannot be migrated
2203 * elsewhere, so there is no point in (re)trying.
2204 */
2205 if (unlikely(!sd)) {
2206 sched_setnuma(p, task_node(p));
2207 return -EINVAL;
2208 }
2209
2210 env.dst_nid = p->numa_preferred_nid;
2211 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
2212 taskweight = task_weight(p, env.src_nid, dist);
2213 groupweight = group_weight(p, env.src_nid, dist);
2214 update_numa_stats(&env, &env.src_stats, env.src_nid, false);
2215 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
2216 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2217 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2218
2219 /* Try to find a spot on the preferred nid. */
2220 task_numa_find_cpu(&env, taskimp, groupimp);
2221
2222 /*
2223 * Look at other nodes in these cases:
2224 * - there is no space available on the preferred_nid
2225 * - the task is part of a numa_group that is interleaved across
2226 * multiple NUMA nodes; in order to better consolidate the group,
2227 * we need to check other locations.
2228 */
2229 ng = deref_curr_numa_group(p);
2230 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2231 for_each_node_state(nid, N_CPU) {
2232 if (nid == env.src_nid || nid == p->numa_preferred_nid)
2233 continue;
2234
2235 dist = node_distance(env.src_nid, env.dst_nid);
2236 if (sched_numa_topology_type == NUMA_BACKPLANE &&
2237 dist != env.dist) {
2238 taskweight = task_weight(p, env.src_nid, dist);
2239 groupweight = group_weight(p, env.src_nid, dist);
2240 }
2241
2242 /* Only consider nodes where both task and groups benefit */
2243 taskimp = task_weight(p, nid, dist) - taskweight;
2244 groupimp = group_weight(p, nid, dist) - groupweight;
2245 if (taskimp < 0 && groupimp < 0)
2246 continue;
2247
2248 env.dist = dist;
2249 env.dst_nid = nid;
2250 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
2251 task_numa_find_cpu(&env, taskimp, groupimp);
2252 }
2253 }
2254
2255 /*
2256 * If the task is part of a workload that spans multiple NUMA nodes,
2257 * and is migrating into one of the workload's active nodes, remember
2258 * this node as the task's preferred numa node, so the workload can
2259 * settle down.
2260 * A task that migrated to a second choice node will be better off
2261 * trying for a better one later. Do not set the preferred node here.
2262 */
2263 if (ng) {
2264 if (env.best_cpu == -1)
2265 nid = env.src_nid;
2266 else
2267 nid = cpu_to_node(env.best_cpu);
2268
2269 if (nid != p->numa_preferred_nid)
2270 sched_setnuma(p, nid);
2271 }
2272
2273 /* No better CPU than the current one was found. */
2274 if (env.best_cpu == -1) {
2275 trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
2276 return -EAGAIN;
2277 }
2278
2279 best_rq = cpu_rq(env.best_cpu);
2280 if (env.best_task == NULL) {
2281 ret = migrate_task_to(p, env.best_cpu);
2282 WRITE_ONCE(best_rq->numa_migrate_on, 0);
2283 if (ret != 0)
2284 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
2285 return ret;
2286 }
2287
2288 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
2289 WRITE_ONCE(best_rq->numa_migrate_on, 0);
2290
2291 if (ret != 0)
2292 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
2293 put_task_struct(env.best_task);
2294 return ret;
2295 }
2296
2297 /* Attempt to migrate a task to a CPU on the preferred node. */
2298 static void numa_migrate_preferred(struct task_struct *p)
2299 {
2300 unsigned long interval = HZ;
2301
2302 /* This task has no NUMA fault statistics yet */
2303 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
2304 return;
2305
2306 /* Periodically retry migrating the task to the preferred node */
2307 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
2308 p->numa_migrate_retry = jiffies + interval;
2309
2310 /* Success if task is already running on preferred CPU */
2311 if (task_node(p) == p->numa_preferred_nid)
2312 return;
2313
2314 /* Otherwise, try migrate to a CPU on the preferred node */
2315 task_numa_migrate(p);
2316 }
2317
2318 /*
2319 * Find out how many nodes the workload is actively running on. Do this by
2320 * tracking the nodes from which NUMA hinting faults are triggered. This can
2321 * be different from the set of nodes where the workload's memory is currently
2322 * located.
2323 */
2324 static void numa_group_count_active_nodes(struct numa_group *numa_group)
2325 {
2326 unsigned long faults, max_faults = 0;
2327 int nid, active_nodes = 0;
2328
2329 for_each_node_state(nid, N_CPU) {
2330 faults = group_faults_cpu(numa_group, nid);
2331 if (faults > max_faults)
2332 max_faults = faults;
2333 }
2334
2335 for_each_node_state(nid, N_CPU) {
2336 faults = group_faults_cpu(numa_group, nid);
2337 if (faults * ACTIVE_NODE_FRACTION > max_faults)
2338 active_nodes++;
2339 }
2340
2341 numa_group->max_faults_cpu = max_faults;
2342 numa_group->active_nodes = active_nodes;
2343 }
2344
2345 /*
2346 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2347 * increments. The more local the fault statistics are, the higher the scan
2348 * period will be for the next scan window. If local/(local+remote) ratio is
2349 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
2350 * the scan period will decrease. Aim for 70% local accesses.
2351 */
2352 #define NUMA_PERIOD_SLOTS 10
2353 #define NUMA_PERIOD_THRESHOLD 7
2354
2355 /*
2356 * Increase the scan period (slow down scanning) if the majority of
2357 * our memory is already on our local node, or if the majority of
2358 * the page accesses are shared with other processes.
2359 * Otherwise, decrease the scan period.
2360 */
2361 static void update_task_scan_period(struct task_struct *p,
2362 unsigned long shared, unsigned long private)
2363 {
2364 unsigned int period_slot;
2365 int lr_ratio, ps_ratio;
2366 int diff;
2367
2368 unsigned long remote = p->numa_faults_locality[0];
2369 unsigned long local = p->numa_faults_locality[1];
2370
2371 /*
2372 * If there were no record hinting faults then either the task is
2373 * completely idle or all activity is in areas that are not of interest
2374 * to automatic numa balancing. Related to that, if there were failed
2375 * migration then it implies we are migrating too quickly or the local
2376 * node is overloaded. In either case, scan slower
2377 */
2378 if (local + shared == 0 || p->numa_faults_locality[2]) {
2379 p->numa_scan_period = min(p->numa_scan_period_max,
2380 p->numa_scan_period << 1);
2381
2382 p->mm->numa_next_scan = jiffies +
2383 msecs_to_jiffies(p->numa_scan_period);
2384
2385 return;
2386 }
2387
2388 /*
2389 * Prepare to scale scan period relative to the current period.
2390 * == NUMA_PERIOD_THRESHOLD scan period stays the same
2391 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2392 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2393 */
2394 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
2395 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
2396 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
2397
2398 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
2399 /*
2400 * Most memory accesses are local. There is no need to
2401 * do fast NUMA scanning, since memory is already local.
2402 */
2403 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
2404 if (!slot)
2405 slot = 1;
2406 diff = slot * period_slot;
2407 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
2408 /*
2409 * Most memory accesses are shared with other tasks.
2410 * There is no point in continuing fast NUMA scanning,
2411 * since other tasks may just move the memory elsewhere.
2412 */
2413 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
2414 if (!slot)
2415 slot = 1;
2416 diff = slot * period_slot;
2417 } else {
2418 /*
2419 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2420 * yet they are not on the local NUMA node. Speed up
2421 * NUMA scanning to get the memory moved over.
2422 */
2423 int ratio = max(lr_ratio, ps_ratio);
2424 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
2425 }
2426
2427 p->numa_scan_period = clamp(p->numa_scan_period + diff,
2428 task_scan_min(p), task_scan_max(p));
2429 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2430 }
2431
2432 /*
2433 * Get the fraction of time the task has been running since the last
2434 * NUMA placement cycle. The scheduler keeps similar statistics, but
2435 * decays those on a 32ms period, which is orders of magnitude off
2436 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2437 * stats only if the task is so new there are no NUMA statistics yet.
2438 */
2439 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2440 {
2441 u64 runtime, delta, now;
2442 /* Use the start of this time slice to avoid calculations. */
2443 now = p->se.exec_start;
2444 runtime = p->se.sum_exec_runtime;
2445
2446 if (p->last_task_numa_placement) {
2447 delta = runtime - p->last_sum_exec_runtime;
2448 *period = now - p->last_task_numa_placement;
2449
2450 /* Avoid time going backwards, prevent potential divide error: */
2451 if (unlikely((s64)*period < 0))
2452 *period = 0;
2453 } else {
2454 delta = p->se.avg.load_sum;
2455 *period = LOAD_AVG_MAX;
2456 }
2457
2458 p->last_sum_exec_runtime = runtime;
2459 p->last_task_numa_placement = now;
2460
2461 return delta;
2462 }
2463
2464 /*
2465 * Determine the preferred nid for a task in a numa_group. This needs to
2466 * be done in a way that produces consistent results with group_weight,
2467 * otherwise workloads might not converge.
2468 */
2469 static int preferred_group_nid(struct task_struct *p, int nid)
2470 {
2471 nodemask_t nodes;
2472 int dist;
2473
2474 /* Direct connections between all NUMA nodes. */
2475 if (sched_numa_topology_type == NUMA_DIRECT)
2476 return nid;
2477
2478 /*
2479 * On a system with glueless mesh NUMA topology, group_weight
2480 * scores nodes according to the number of NUMA hinting faults on
2481 * both the node itself, and on nearby nodes.
2482 */
2483 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2484 unsigned long score, max_score = 0;
2485 int node, max_node = nid;
2486
2487 dist = sched_max_numa_distance;
2488
2489 for_each_node_state(node, N_CPU) {
2490 score = group_weight(p, node, dist);
2491 if (score > max_score) {
2492 max_score = score;
2493 max_node = node;
2494 }
2495 }
2496 return max_node;
2497 }
2498
2499 /*
2500 * Finding the preferred nid in a system with NUMA backplane
2501 * interconnect topology is more involved. The goal is to locate
2502 * tasks from numa_groups near each other in the system, and
2503 * untangle workloads from different sides of the system. This requires
2504 * searching down the hierarchy of node groups, recursively searching
2505 * inside the highest scoring group of nodes. The nodemask tricks
2506 * keep the complexity of the search down.
2507 */
2508 nodes = node_states[N_CPU];
2509 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2510 unsigned long max_faults = 0;
2511 nodemask_t max_group = NODE_MASK_NONE;
2512 int a, b;
2513
2514 /* Are there nodes at this distance from each other? */
2515 if (!find_numa_distance(dist))
2516 continue;
2517
2518 for_each_node_mask(a, nodes) {
2519 unsigned long faults = 0;
2520 nodemask_t this_group;
2521 nodes_clear(this_group);
2522
2523 /* Sum group's NUMA faults; includes a==b case. */
2524 for_each_node_mask(b, nodes) {
2525 if (node_distance(a, b) < dist) {
2526 faults += group_faults(p, b);
2527 node_set(b, this_group);
2528 node_clear(b, nodes);
2529 }
2530 }
2531
2532 /* Remember the top group. */
2533 if (faults > max_faults) {
2534 max_faults = faults;
2535 max_group = this_group;
2536 /*
2537 * subtle: at the smallest distance there is
2538 * just one node left in each "group", the
2539 * winner is the preferred nid.
2540 */
2541 nid = a;
2542 }
2543 }
2544 /* Next round, evaluate the nodes within max_group. */
2545 if (!max_faults)
2546 break;
2547 nodes = max_group;
2548 }
2549 return nid;
2550 }
2551
2552 static void task_numa_placement(struct task_struct *p)
2553 {
2554 int seq, nid, max_nid = NUMA_NO_NODE;
2555 unsigned long max_faults = 0;
2556 unsigned long fault_types[2] = { 0, 0 };
2557 unsigned long total_faults;
2558 u64 runtime, period;
2559 spinlock_t *group_lock = NULL;
2560 struct numa_group *ng;
2561
2562 /*
2563 * The p->mm->numa_scan_seq field gets updated without
2564 * exclusive access. Use READ_ONCE() here to ensure
2565 * that the field is read in a single access:
2566 */
2567 seq = READ_ONCE(p->mm->numa_scan_seq);
2568 if (p->numa_scan_seq == seq)
2569 return;
2570 p->numa_scan_seq = seq;
2571 p->numa_scan_period_max = task_scan_max(p);
2572
2573 total_faults = p->numa_faults_locality[0] +
2574 p->numa_faults_locality[1];
2575 runtime = numa_get_avg_runtime(p, &period);
2576
2577 /* If the task is part of a group prevent parallel updates to group stats */
2578 ng = deref_curr_numa_group(p);
2579 if (ng) {
2580 group_lock = &ng->lock;
2581 spin_lock_irq(group_lock);
2582 }
2583
2584 /* Find the node with the highest number of faults */
2585 for_each_online_node(nid) {
2586 /* Keep track of the offsets in numa_faults array */
2587 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2588 unsigned long faults = 0, group_faults = 0;
2589 int priv;
2590
2591 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2592 long diff, f_diff, f_weight;
2593
2594 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2595 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2596 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2597 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2598
2599 /* Decay existing window, copy faults since last scan */
2600 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2601 fault_types[priv] += p->numa_faults[membuf_idx];
2602 p->numa_faults[membuf_idx] = 0;
2603
2604 /*
2605 * Normalize the faults_from, so all tasks in a group
2606 * count according to CPU use, instead of by the raw
2607 * number of faults. Tasks with little runtime have
2608 * little over-all impact on throughput, and thus their
2609 * faults are less important.
2610 */
2611 f_weight = div64_u64(runtime << 16, period + 1);
2612 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2613 (total_faults + 1);
2614 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2615 p->numa_faults[cpubuf_idx] = 0;
2616
2617 p->numa_faults[mem_idx] += diff;
2618 p->numa_faults[cpu_idx] += f_diff;
2619 faults += p->numa_faults[mem_idx];
2620 p->total_numa_faults += diff;
2621 if (ng) {
2622 /*
2623 * safe because we can only change our own group
2624 *
2625 * mem_idx represents the offset for a given
2626 * nid and priv in a specific region because it
2627 * is at the beginning of the numa_faults array.
2628 */
2629 ng->faults[mem_idx] += diff;
2630 ng->faults[cpu_idx] += f_diff;
2631 ng->total_faults += diff;
2632 group_faults += ng->faults[mem_idx];
2633 }
2634 }
2635
2636 if (!ng) {
2637 if (faults > max_faults) {
2638 max_faults = faults;
2639 max_nid = nid;
2640 }
2641 } else if (group_faults > max_faults) {
2642 max_faults = group_faults;
2643 max_nid = nid;
2644 }
2645 }
2646
2647 /* Cannot migrate task to CPU-less node */
2648 if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) {
2649 int near_nid = max_nid;
2650 int distance, near_distance = INT_MAX;
2651
2652 for_each_node_state(nid, N_CPU) {
2653 distance = node_distance(max_nid, nid);
2654 if (distance < near_distance) {
2655 near_nid = nid;
2656 near_distance = distance;
2657 }
2658 }
2659 max_nid = near_nid;
2660 }
2661
2662 if (ng) {
2663 numa_group_count_active_nodes(ng);
2664 spin_unlock_irq(group_lock);
2665 max_nid = preferred_group_nid(p, max_nid);
2666 }
2667
2668 if (max_faults) {
2669 /* Set the new preferred node */
2670 if (max_nid != p->numa_preferred_nid)
2671 sched_setnuma(p, max_nid);
2672 }
2673
2674 update_task_scan_period(p, fault_types[0], fault_types[1]);
2675 }
2676
2677 static inline int get_numa_group(struct numa_group *grp)
2678 {
2679 return refcount_inc_not_zero(&grp->refcount);
2680 }
2681
2682 static inline void put_numa_group(struct numa_group *grp)
2683 {
2684 if (refcount_dec_and_test(&grp->refcount))
2685 kfree_rcu(grp, rcu);
2686 }
2687
2688 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2689 int *priv)
2690 {
2691 struct numa_group *grp, *my_grp;
2692 struct task_struct *tsk;
2693 bool join = false;
2694 int cpu = cpupid_to_cpu(cpupid);
2695 int i;
2696
2697 if (unlikely(!deref_curr_numa_group(p))) {
2698 unsigned int size = sizeof(struct numa_group) +
2699 NR_NUMA_HINT_FAULT_STATS *
2700 nr_node_ids * sizeof(unsigned long);
2701
2702 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2703 if (!grp)
2704 return;
2705
2706 refcount_set(&grp->refcount, 1);
2707 grp->active_nodes = 1;
2708 grp->max_faults_cpu = 0;
2709 spin_lock_init(&grp->lock);
2710 grp->gid = p->pid;
2711
2712 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2713 grp->faults[i] = p->numa_faults[i];
2714
2715 grp->total_faults = p->total_numa_faults;
2716
2717 grp->nr_tasks++;
2718 rcu_assign_pointer(p->numa_group, grp);
2719 }
2720
2721 rcu_read_lock();
2722 tsk = READ_ONCE(cpu_rq(cpu)->curr);
2723
2724 if (!cpupid_match_pid(tsk, cpupid))
2725 goto no_join;
2726
2727 grp = rcu_dereference(tsk->numa_group);
2728 if (!grp)
2729 goto no_join;
2730
2731 my_grp = deref_curr_numa_group(p);
2732 if (grp == my_grp)
2733 goto no_join;
2734
2735 /*
2736 * Only join the other group if its bigger; if we're the bigger group,
2737 * the other task will join us.
2738 */
2739 if (my_grp->nr_tasks > grp->nr_tasks)
2740 goto no_join;
2741
2742 /*
2743 * Tie-break on the grp address.
2744 */
2745 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2746 goto no_join;
2747
2748 /* Always join threads in the same process. */
2749 if (tsk->mm == current->mm)
2750 join = true;
2751
2752 /* Simple filter to avoid false positives due to PID collisions */
2753 if (flags & TNF_SHARED)
2754 join = true;
2755
2756 /* Update priv based on whether false sharing was detected */
2757 *priv = !join;
2758
2759 if (join && !get_numa_group(grp))
2760 goto no_join;
2761
2762 rcu_read_unlock();
2763
2764 if (!join)
2765 return;
2766
2767 WARN_ON_ONCE(irqs_disabled());
2768 double_lock_irq(&my_grp->lock, &grp->lock);
2769
2770 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2771 my_grp->faults[i] -= p->numa_faults[i];
2772 grp->faults[i] += p->numa_faults[i];
2773 }
2774 my_grp->total_faults -= p->total_numa_faults;
2775 grp->total_faults += p->total_numa_faults;
2776
2777 my_grp->nr_tasks--;
2778 grp->nr_tasks++;
2779
2780 spin_unlock(&my_grp->lock);
2781 spin_unlock_irq(&grp->lock);
2782
2783 rcu_assign_pointer(p->numa_group, grp);
2784
2785 put_numa_group(my_grp);
2786 return;
2787
2788 no_join:
2789 rcu_read_unlock();
2790 return;
2791 }
2792
2793 /*
2794 * Get rid of NUMA statistics associated with a task (either current or dead).
2795 * If @final is set, the task is dead and has reached refcount zero, so we can
2796 * safely free all relevant data structures. Otherwise, there might be
2797 * concurrent reads from places like load balancing and procfs, and we should
2798 * reset the data back to default state without freeing ->numa_faults.
2799 */
2800 void task_numa_free(struct task_struct *p, bool final)
2801 {
2802 /* safe: p either is current or is being freed by current */
2803 struct numa_group *grp = rcu_dereference_raw(p->numa_group);
2804 unsigned long *numa_faults = p->numa_faults;
2805 unsigned long flags;
2806 int i;
2807
2808 if (!numa_faults)
2809 return;
2810
2811 if (grp) {
2812 spin_lock_irqsave(&grp->lock, flags);
2813 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2814 grp->faults[i] -= p->numa_faults[i];
2815 grp->total_faults -= p->total_numa_faults;
2816
2817 grp->nr_tasks--;
2818 spin_unlock_irqrestore(&grp->lock, flags);
2819 RCU_INIT_POINTER(p->numa_group, NULL);
2820 put_numa_group(grp);
2821 }
2822
2823 if (final) {
2824 p->numa_faults = NULL;
2825 kfree(numa_faults);
2826 } else {
2827 p->total_numa_faults = 0;
2828 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2829 numa_faults[i] = 0;
2830 }
2831 }
2832
2833 /*
2834 * Got a PROT_NONE fault for a page on @node.
2835 */
2836 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2837 {
2838 struct task_struct *p = current;
2839 bool migrated = flags & TNF_MIGRATED;
2840 int cpu_node = task_node(current);
2841 int local = !!(flags & TNF_FAULT_LOCAL);
2842 struct numa_group *ng;
2843 int priv;
2844
2845 if (!static_branch_likely(&sched_numa_balancing))
2846 return;
2847
2848 /* for example, ksmd faulting in a user's mm */
2849 if (!p->mm)
2850 return;
2851
2852 /*
2853 * NUMA faults statistics are unnecessary for the slow memory
2854 * node for memory tiering mode.
2855 */
2856 if (!node_is_toptier(mem_node) &&
2857 (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ||
2858 !cpupid_valid(last_cpupid)))
2859 return;
2860
2861 /* Allocate buffer to track faults on a per-node basis */
2862 if (unlikely(!p->numa_faults)) {
2863 int size = sizeof(*p->numa_faults) *
2864 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2865
2866 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2867 if (!p->numa_faults)
2868 return;
2869
2870 p->total_numa_faults = 0;
2871 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2872 }
2873
2874 /*
2875 * First accesses are treated as private, otherwise consider accesses
2876 * to be private if the accessing pid has not changed
2877 */
2878 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2879 priv = 1;
2880 } else {
2881 priv = cpupid_match_pid(p, last_cpupid);
2882 if (!priv && !(flags & TNF_NO_GROUP))
2883 task_numa_group(p, last_cpupid, flags, &priv);
2884 }
2885
2886 /*
2887 * If a workload spans multiple NUMA nodes, a shared fault that
2888 * occurs wholly within the set of nodes that the workload is
2889 * actively using should be counted as local. This allows the
2890 * scan rate to slow down when a workload has settled down.
2891 */
2892 ng = deref_curr_numa_group(p);
2893 if (!priv && !local && ng && ng->active_nodes > 1 &&
2894 numa_is_active_node(cpu_node, ng) &&
2895 numa_is_active_node(mem_node, ng))
2896 local = 1;
2897
2898 /*
2899 * Retry to migrate task to preferred node periodically, in case it
2900 * previously failed, or the scheduler moved us.
2901 */
2902 if (time_after(jiffies, p->numa_migrate_retry)) {
2903 task_numa_placement(p);
2904 numa_migrate_preferred(p);
2905 }
2906
2907 if (migrated)
2908 p->numa_pages_migrated += pages;
2909 if (flags & TNF_MIGRATE_FAIL)
2910 p->numa_faults_locality[2] += pages;
2911
2912 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2913 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2914 p->numa_faults_locality[local] += pages;
2915 }
2916
2917 static void reset_ptenuma_scan(struct task_struct *p)
2918 {
2919 /*
2920 * We only did a read acquisition of the mmap sem, so
2921 * p->mm->numa_scan_seq is written to without exclusive access
2922 * and the update is not guaranteed to be atomic. That's not
2923 * much of an issue though, since this is just used for
2924 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2925 * expensive, to avoid any form of compiler optimizations:
2926 */
2927 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2928 p->mm->numa_scan_offset = 0;
2929 }
2930
2931 static bool vma_is_accessed(struct vm_area_struct *vma)
2932 {
2933 unsigned long pids;
2934 /*
2935 * Allow unconditional access first two times, so that all the (pages)
2936 * of VMAs get prot_none fault introduced irrespective of accesses.
2937 * This is also done to avoid any side effect of task scanning
2938 * amplifying the unfairness of disjoint set of VMAs' access.
2939 */
2940 if (READ_ONCE(current->mm->numa_scan_seq) < 2)
2941 return true;
2942
2943 pids = vma->numab_state->access_pids[0] | vma->numab_state->access_pids[1];
2944 return test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids);
2945 }
2946
2947 #define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay)
2948
2949 /*
2950 * The expensive part of numa migration is done from task_work context.
2951 * Triggered from task_tick_numa().
2952 */
2953 static void task_numa_work(struct callback_head *work)
2954 {
2955 unsigned long migrate, next_scan, now = jiffies;
2956 struct task_struct *p = current;
2957 struct mm_struct *mm = p->mm;
2958 u64 runtime = p->se.sum_exec_runtime;
2959 struct vm_area_struct *vma;
2960 unsigned long start, end;
2961 unsigned long nr_pte_updates = 0;
2962 long pages, virtpages;
2963 struct vma_iterator vmi;
2964
2965 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2966
2967 work->next = work;
2968 /*
2969 * Who cares about NUMA placement when they're dying.
2970 *
2971 * NOTE: make sure not to dereference p->mm before this check,
2972 * exit_task_work() happens _after_ exit_mm() so we could be called
2973 * without p->mm even though we still had it when we enqueued this
2974 * work.
2975 */
2976 if (p->flags & PF_EXITING)
2977 return;
2978
2979 if (!mm->numa_next_scan) {
2980 mm->numa_next_scan = now +
2981 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2982 }
2983
2984 /*
2985 * Enforce maximal scan/migration frequency..
2986 */
2987 migrate = mm->numa_next_scan;
2988 if (time_before(now, migrate))
2989 return;
2990
2991 if (p->numa_scan_period == 0) {
2992 p->numa_scan_period_max = task_scan_max(p);
2993 p->numa_scan_period = task_scan_start(p);
2994 }
2995
2996 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2997 if (!try_cmpxchg(&mm->numa_next_scan, &migrate, next_scan))
2998 return;
2999
3000 /*
3001 * Delay this task enough that another task of this mm will likely win
3002 * the next time around.
3003 */
3004 p->node_stamp += 2 * TICK_NSEC;
3005
3006 start = mm->numa_scan_offset;
3007 pages = sysctl_numa_balancing_scan_size;
3008 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
3009 virtpages = pages * 8; /* Scan up to this much virtual space */
3010 if (!pages)
3011 return;
3012
3013
3014 if (!mmap_read_trylock(mm))
3015 return;
3016 vma_iter_init(&vmi, mm, start);
3017 vma = vma_next(&vmi);
3018 if (!vma) {
3019 reset_ptenuma_scan(p);
3020 start = 0;
3021 vma_iter_set(&vmi, start);
3022 vma = vma_next(&vmi);
3023 }
3024
3025 do {
3026 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
3027 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
3028 continue;
3029 }
3030
3031 /*
3032 * Shared library pages mapped by multiple processes are not
3033 * migrated as it is expected they are cache replicated. Avoid
3034 * hinting faults in read-only file-backed mappings or the vdso
3035 * as migrating the pages will be of marginal benefit.
3036 */
3037 if (!vma->vm_mm ||
3038 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
3039 continue;
3040
3041 /*
3042 * Skip inaccessible VMAs to avoid any confusion between
3043 * PROT_NONE and NUMA hinting ptes
3044 */
3045 if (!vma_is_accessible(vma))
3046 continue;
3047
3048 /* Initialise new per-VMA NUMAB state. */
3049 if (!vma->numab_state) {
3050 vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
3051 GFP_KERNEL);
3052 if (!vma->numab_state)
3053 continue;
3054
3055 vma->numab_state->next_scan = now +
3056 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
3057
3058 /* Reset happens after 4 times scan delay of scan start */
3059 vma->numab_state->next_pid_reset = vma->numab_state->next_scan +
3060 msecs_to_jiffies(VMA_PID_RESET_PERIOD);
3061 }
3062
3063 /*
3064 * Scanning the VMA's of short lived tasks add more overhead. So
3065 * delay the scan for new VMAs.
3066 */
3067 if (mm->numa_scan_seq && time_before(jiffies,
3068 vma->numab_state->next_scan))
3069 continue;
3070
3071 /* Do not scan the VMA if task has not accessed */
3072 if (!vma_is_accessed(vma))
3073 continue;
3074
3075 /*
3076 * RESET access PIDs regularly for old VMAs. Resetting after checking
3077 * vma for recent access to avoid clearing PID info before access..
3078 */
3079 if (mm->numa_scan_seq &&
3080 time_after(jiffies, vma->numab_state->next_pid_reset)) {
3081 vma->numab_state->next_pid_reset = vma->numab_state->next_pid_reset +
3082 msecs_to_jiffies(VMA_PID_RESET_PERIOD);
3083 vma->numab_state->access_pids[0] = READ_ONCE(vma->numab_state->access_pids[1]);
3084 vma->numab_state->access_pids[1] = 0;
3085 }
3086
3087 do {
3088 start = max(start, vma->vm_start);
3089 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
3090 end = min(end, vma->vm_end);
3091 nr_pte_updates = change_prot_numa(vma, start, end);
3092
3093 /*
3094 * Try to scan sysctl_numa_balancing_size worth of
3095 * hpages that have at least one present PTE that
3096 * is not already pte-numa. If the VMA contains
3097 * areas that are unused or already full of prot_numa
3098 * PTEs, scan up to virtpages, to skip through those
3099 * areas faster.
3100 */
3101 if (nr_pte_updates)
3102 pages -= (end - start) >> PAGE_SHIFT;
3103 virtpages -= (end - start) >> PAGE_SHIFT;
3104
3105 start = end;
3106 if (pages <= 0 || virtpages <= 0)
3107 goto out;
3108
3109 cond_resched();
3110 } while (end != vma->vm_end);
3111 } for_each_vma(vmi, vma);
3112
3113 out:
3114 /*
3115 * It is possible to reach the end of the VMA list but the last few
3116 * VMAs are not guaranteed to the vma_migratable. If they are not, we
3117 * would find the !migratable VMA on the next scan but not reset the
3118 * scanner to the start so check it now.
3119 */
3120 if (vma)
3121 mm->numa_scan_offset = start;
3122 else
3123 reset_ptenuma_scan(p);
3124 mmap_read_unlock(mm);
3125
3126 /*
3127 * Make sure tasks use at least 32x as much time to run other code
3128 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
3129 * Usually update_task_scan_period slows down scanning enough; on an
3130 * overloaded system we need to limit overhead on a per task basis.
3131 */
3132 if (unlikely(p->se.sum_exec_runtime != runtime)) {
3133 u64 diff = p->se.sum_exec_runtime - runtime;
3134 p->node_stamp += 32 * diff;
3135 }
3136 }
3137
3138 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
3139 {
3140 int mm_users = 0;
3141 struct mm_struct *mm = p->mm;
3142
3143 if (mm) {
3144 mm_users = atomic_read(&mm->mm_users);
3145 if (mm_users == 1) {
3146 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
3147 mm->numa_scan_seq = 0;
3148 }
3149 }
3150 p->node_stamp = 0;
3151 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
3152 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
3153 p->numa_migrate_retry = 0;
3154 /* Protect against double add, see task_tick_numa and task_numa_work */
3155 p->numa_work.next = &p->numa_work;
3156 p->numa_faults = NULL;
3157 p->numa_pages_migrated = 0;
3158 p->total_numa_faults = 0;
3159 RCU_INIT_POINTER(p->numa_group, NULL);
3160 p->last_task_numa_placement = 0;
3161 p->last_sum_exec_runtime = 0;
3162
3163 init_task_work(&p->numa_work, task_numa_work);
3164
3165 /* New address space, reset the preferred nid */
3166 if (!(clone_flags & CLONE_VM)) {
3167 p->numa_preferred_nid = NUMA_NO_NODE;
3168 return;
3169 }
3170
3171 /*
3172 * New thread, keep existing numa_preferred_nid which should be copied
3173 * already by arch_dup_task_struct but stagger when scans start.
3174 */
3175 if (mm) {
3176 unsigned int delay;
3177
3178 delay = min_t(unsigned int, task_scan_max(current),
3179 current->numa_scan_period * mm_users * NSEC_PER_MSEC);
3180 delay += 2 * TICK_NSEC;
3181 p->node_stamp = delay;
3182 }
3183 }
3184
3185 /*
3186 * Drive the periodic memory faults..
3187 */
3188 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3189 {
3190 struct callback_head *work = &curr->numa_work;
3191 u64 period, now;
3192
3193 /*
3194 * We don't care about NUMA placement if we don't have memory.
3195 */
3196 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
3197 return;
3198
3199 /*
3200 * Using runtime rather than walltime has the dual advantage that
3201 * we (mostly) drive the selection from busy threads and that the
3202 * task needs to have done some actual work before we bother with
3203 * NUMA placement.
3204 */
3205 now = curr->se.sum_exec_runtime;
3206 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3207
3208 if (now > curr->node_stamp + period) {
3209 if (!curr->node_stamp)
3210 curr->numa_scan_period = task_scan_start(curr);
3211 curr->node_stamp += period;
3212
3213 if (!time_before(jiffies, curr->mm->numa_next_scan))
3214 task_work_add(curr, work, TWA_RESUME);
3215 }
3216 }
3217
3218 static void update_scan_period(struct task_struct *p, int new_cpu)
3219 {
3220 int src_nid = cpu_to_node(task_cpu(p));
3221 int dst_nid = cpu_to_node(new_cpu);
3222
3223 if (!static_branch_likely(&sched_numa_balancing))
3224 return;
3225
3226 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
3227 return;
3228
3229 if (src_nid == dst_nid)
3230 return;
3231
3232 /*
3233 * Allow resets if faults have been trapped before one scan
3234 * has completed. This is most likely due to a new task that
3235 * is pulled cross-node due to wakeups or load balancing.
3236 */
3237 if (p->numa_scan_seq) {
3238 /*
3239 * Avoid scan adjustments if moving to the preferred
3240 * node or if the task was not previously running on
3241 * the preferred node.
3242 */
3243 if (dst_nid == p->numa_preferred_nid ||
3244 (p->numa_preferred_nid != NUMA_NO_NODE &&
3245 src_nid != p->numa_preferred_nid))
3246 return;
3247 }
3248
3249 p->numa_scan_period = task_scan_start(p);
3250 }
3251
3252 #else
3253 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3254 {
3255 }
3256
3257 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
3258 {
3259 }
3260
3261 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
3262 {
3263 }
3264
3265 static inline void update_scan_period(struct task_struct *p, int new_cpu)
3266 {
3267 }
3268
3269 #endif /* CONFIG_NUMA_BALANCING */
3270
3271 static void
3272 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3273 {
3274 update_load_add(&cfs_rq->load, se->load.weight);
3275 #ifdef CONFIG_SMP
3276 if (entity_is_task(se)) {
3277 struct rq *rq = rq_of(cfs_rq);
3278
3279 account_numa_enqueue(rq, task_of(se));
3280 list_add(&se->group_node, &rq->cfs_tasks);
3281 }
3282 #endif
3283 cfs_rq->nr_running++;
3284 if (se_is_idle(se))
3285 cfs_rq->idle_nr_running++;
3286 }
3287
3288 static void
3289 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3290 {
3291 update_load_sub(&cfs_rq->load, se->load.weight);
3292 #ifdef CONFIG_SMP
3293 if (entity_is_task(se)) {
3294 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3295 list_del_init(&se->group_node);
3296 }
3297 #endif
3298 cfs_rq->nr_running--;
3299 if (se_is_idle(se))
3300 cfs_rq->idle_nr_running--;
3301 }
3302
3303 /*
3304 * Signed add and clamp on underflow.
3305 *
3306 * Explicitly do a load-store to ensure the intermediate value never hits
3307 * memory. This allows lockless observations without ever seeing the negative
3308 * values.
3309 */
3310 #define add_positive(_ptr, _val) do { \
3311 typeof(_ptr) ptr = (_ptr); \
3312 typeof(_val) val = (_val); \
3313 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3314 \
3315 res = var + val; \
3316 \
3317 if (val < 0 && res > var) \
3318 res = 0; \
3319 \
3320 WRITE_ONCE(*ptr, res); \
3321 } while (0)
3322
3323 /*
3324 * Unsigned subtract and clamp on underflow.
3325 *
3326 * Explicitly do a load-store to ensure the intermediate value never hits
3327 * memory. This allows lockless observations without ever seeing the negative
3328 * values.
3329 */
3330 #define sub_positive(_ptr, _val) do { \
3331 typeof(_ptr) ptr = (_ptr); \
3332 typeof(*ptr) val = (_val); \
3333 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3334 res = var - val; \
3335 if (res > var) \
3336 res = 0; \
3337 WRITE_ONCE(*ptr, res); \
3338 } while (0)
3339
3340 /*
3341 * Remove and clamp on negative, from a local variable.
3342 *
3343 * A variant of sub_positive(), which does not use explicit load-store
3344 * and is thus optimized for local variable updates.
3345 */
3346 #define lsub_positive(_ptr, _val) do { \
3347 typeof(_ptr) ptr = (_ptr); \
3348 *ptr -= min_t(typeof(*ptr), *ptr, _val); \
3349 } while (0)
3350
3351 #ifdef CONFIG_SMP
3352 static inline void
3353 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3354 {
3355 cfs_rq->avg.load_avg += se->avg.load_avg;
3356 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3357 }
3358
3359 static inline void
3360 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3361 {
3362 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3363 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3364 /* See update_cfs_rq_load_avg() */
3365 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3366 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3367 }
3368 #else
3369 static inline void
3370 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3371 static inline void
3372 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3373 #endif
3374
3375 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3376 unsigned long weight)
3377 {
3378 if (se->on_rq) {
3379 /* commit outstanding execution time */
3380 if (cfs_rq->curr == se)
3381 update_curr(cfs_rq);
3382 update_load_sub(&cfs_rq->load, se->load.weight);
3383 }
3384 dequeue_load_avg(cfs_rq, se);
3385
3386 update_load_set(&se->load, weight);
3387
3388 #ifdef CONFIG_SMP
3389 do {
3390 u32 divider = get_pelt_divider(&se->avg);
3391
3392 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3393 } while (0);
3394 #endif
3395
3396 enqueue_load_avg(cfs_rq, se);
3397 if (se->on_rq)
3398 update_load_add(&cfs_rq->load, se->load.weight);
3399
3400 }
3401
3402 void reweight_task(struct task_struct *p, int prio)
3403 {
3404 struct sched_entity *se = &p->se;
3405 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3406 struct load_weight *load = &se->load;
3407 unsigned long weight = scale_load(sched_prio_to_weight[prio]);
3408
3409 reweight_entity(cfs_rq, se, weight);
3410 load->inv_weight = sched_prio_to_wmult[prio];
3411 }
3412
3413 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3414
3415 #ifdef CONFIG_FAIR_GROUP_SCHED
3416 #ifdef CONFIG_SMP
3417 /*
3418 * All this does is approximate the hierarchical proportion which includes that
3419 * global sum we all love to hate.
3420 *
3421 * That is, the weight of a group entity, is the proportional share of the
3422 * group weight based on the group runqueue weights. That is:
3423 *
3424 * tg->weight * grq->load.weight
3425 * ge->load.weight = ----------------------------- (1)
3426 * \Sum grq->load.weight
3427 *
3428 * Now, because computing that sum is prohibitively expensive to compute (been
3429 * there, done that) we approximate it with this average stuff. The average
3430 * moves slower and therefore the approximation is cheaper and more stable.
3431 *
3432 * So instead of the above, we substitute:
3433 *
3434 * grq->load.weight -> grq->avg.load_avg (2)
3435 *
3436 * which yields the following:
3437 *
3438 * tg->weight * grq->avg.load_avg
3439 * ge->load.weight = ------------------------------ (3)
3440 * tg->load_avg
3441 *
3442 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3443 *
3444 * That is shares_avg, and it is right (given the approximation (2)).
3445 *
3446 * The problem with it is that because the average is slow -- it was designed
3447 * to be exactly that of course -- this leads to transients in boundary
3448 * conditions. In specific, the case where the group was idle and we start the
3449 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3450 * yielding bad latency etc..
3451 *
3452 * Now, in that special case (1) reduces to:
3453 *
3454 * tg->weight * grq->load.weight
3455 * ge->load.weight = ----------------------------- = tg->weight (4)
3456 * grp->load.weight
3457 *
3458 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3459 *
3460 * So what we do is modify our approximation (3) to approach (4) in the (near)
3461 * UP case, like:
3462 *
3463 * ge->load.weight =
3464 *
3465 * tg->weight * grq->load.weight
3466 * --------------------------------------------------- (5)
3467 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3468 *
3469 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3470 * we need to use grq->avg.load_avg as its lower bound, which then gives:
3471 *
3472 *
3473 * tg->weight * grq->load.weight
3474 * ge->load.weight = ----------------------------- (6)
3475 * tg_load_avg'
3476 *
3477 * Where:
3478 *
3479 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3480 * max(grq->load.weight, grq->avg.load_avg)
3481 *
3482 * And that is shares_weight and is icky. In the (near) UP case it approaches
3483 * (4) while in the normal case it approaches (3). It consistently
3484 * overestimates the ge->load.weight and therefore:
3485 *
3486 * \Sum ge->load.weight >= tg->weight
3487 *
3488 * hence icky!
3489 */
3490 static long calc_group_shares(struct cfs_rq *cfs_rq)
3491 {
3492 long tg_weight, tg_shares, load, shares;
3493 struct task_group *tg = cfs_rq->tg;
3494
3495 tg_shares = READ_ONCE(tg->shares);
3496
3497 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3498
3499 tg_weight = atomic_long_read(&tg->load_avg);
3500
3501 /* Ensure tg_weight >= load */
3502 tg_weight -= cfs_rq->tg_load_avg_contrib;
3503 tg_weight += load;
3504
3505 shares = (tg_shares * load);
3506 if (tg_weight)
3507 shares /= tg_weight;
3508
3509 /*
3510 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
3511 * of a group with small tg->shares value. It is a floor value which is
3512 * assigned as a minimum load.weight to the sched_entity representing
3513 * the group on a CPU.
3514 *
3515 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3516 * on an 8-core system with 8 tasks each runnable on one CPU shares has
3517 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
3518 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
3519 * instead of 0.
3520 */
3521 return clamp_t(long, shares, MIN_SHARES, tg_shares);
3522 }
3523 #endif /* CONFIG_SMP */
3524
3525 /*
3526 * Recomputes the group entity based on the current state of its group
3527 * runqueue.
3528 */
3529 static void update_cfs_group(struct sched_entity *se)
3530 {
3531 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3532 long shares;
3533
3534 if (!gcfs_rq)
3535 return;
3536
3537 if (throttled_hierarchy(gcfs_rq))
3538 return;
3539
3540 #ifndef CONFIG_SMP
3541 shares = READ_ONCE(gcfs_rq->tg->shares);
3542
3543 if (likely(se->load.weight == shares))
3544 return;
3545 #else
3546 shares = calc_group_shares(gcfs_rq);
3547 #endif
3548
3549 reweight_entity(cfs_rq_of(se), se, shares);
3550 }
3551
3552 #else /* CONFIG_FAIR_GROUP_SCHED */
3553 static inline void update_cfs_group(struct sched_entity *se)
3554 {
3555 }
3556 #endif /* CONFIG_FAIR_GROUP_SCHED */
3557
3558 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3559 {
3560 struct rq *rq = rq_of(cfs_rq);
3561
3562 if (&rq->cfs == cfs_rq) {
3563 /*
3564 * There are a few boundary cases this might miss but it should
3565 * get called often enough that that should (hopefully) not be
3566 * a real problem.
3567 *
3568 * It will not get called when we go idle, because the idle
3569 * thread is a different class (!fair), nor will the utilization
3570 * number include things like RT tasks.
3571 *
3572 * As is, the util number is not freq-invariant (we'd have to
3573 * implement arch_scale_freq_capacity() for that).
3574 *
3575 * See cpu_util_cfs().
3576 */
3577 cpufreq_update_util(rq, flags);
3578 }
3579 }
3580
3581 #ifdef CONFIG_SMP
3582 static inline bool load_avg_is_decayed(struct sched_avg *sa)
3583 {
3584 if (sa->load_sum)
3585 return false;
3586
3587 if (sa->util_sum)
3588 return false;
3589
3590 if (sa->runnable_sum)
3591 return false;
3592
3593 /*
3594 * _avg must be null when _sum are null because _avg = _sum / divider
3595 * Make sure that rounding and/or propagation of PELT values never
3596 * break this.
3597 */
3598 SCHED_WARN_ON(sa->load_avg ||
3599 sa->util_avg ||
3600 sa->runnable_avg);
3601
3602 return true;
3603 }
3604
3605 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3606 {
3607 return u64_u32_load_copy(cfs_rq->avg.last_update_time,
3608 cfs_rq->last_update_time_copy);
3609 }
3610 #ifdef CONFIG_FAIR_GROUP_SCHED
3611 /*
3612 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
3613 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
3614 * bottom-up, we only have to test whether the cfs_rq before us on the list
3615 * is our child.
3616 * If cfs_rq is not on the list, test whether a child needs its to be added to
3617 * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
3618 */
3619 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
3620 {
3621 struct cfs_rq *prev_cfs_rq;
3622 struct list_head *prev;
3623
3624 if (cfs_rq->on_list) {
3625 prev = cfs_rq->leaf_cfs_rq_list.prev;
3626 } else {
3627 struct rq *rq = rq_of(cfs_rq);
3628
3629 prev = rq->tmp_alone_branch;
3630 }
3631
3632 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
3633
3634 return (prev_cfs_rq->tg->parent == cfs_rq->tg);
3635 }
3636
3637 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
3638 {
3639 if (cfs_rq->load.weight)
3640 return false;
3641
3642 if (!load_avg_is_decayed(&cfs_rq->avg))
3643 return false;
3644
3645 if (child_cfs_rq_on_list(cfs_rq))
3646 return false;
3647
3648 return true;
3649 }
3650
3651 /**
3652 * update_tg_load_avg - update the tg's load avg
3653 * @cfs_rq: the cfs_rq whose avg changed
3654 *
3655 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3656 * However, because tg->load_avg is a global value there are performance
3657 * considerations.
3658 *
3659 * In order to avoid having to look at the other cfs_rq's, we use a
3660 * differential update where we store the last value we propagated. This in
3661 * turn allows skipping updates if the differential is 'small'.
3662 *
3663 * Updating tg's load_avg is necessary before update_cfs_share().
3664 */
3665 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
3666 {
3667 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
3668
3669 /*
3670 * No need to update load_avg for root_task_group as it is not used.
3671 */
3672 if (cfs_rq->tg == &root_task_group)
3673 return;
3674
3675 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3676 atomic_long_add(delta, &cfs_rq->tg->load_avg);
3677 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
3678 }
3679 }
3680
3681 /*
3682 * Called within set_task_rq() right before setting a task's CPU. The
3683 * caller only guarantees p->pi_lock is held; no other assumptions,
3684 * including the state of rq->lock, should be made.
3685 */
3686 void set_task_rq_fair(struct sched_entity *se,
3687 struct cfs_rq *prev, struct cfs_rq *next)
3688 {
3689 u64 p_last_update_time;
3690 u64 n_last_update_time;
3691
3692 if (!sched_feat(ATTACH_AGE_LOAD))
3693 return;
3694
3695 /*
3696 * We are supposed to update the task to "current" time, then its up to
3697 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3698 * getting what current time is, so simply throw away the out-of-date
3699 * time. This will result in the wakee task is less decayed, but giving
3700 * the wakee more load sounds not bad.
3701 */
3702 if (!(se->avg.last_update_time && prev))
3703 return;
3704
3705 p_last_update_time = cfs_rq_last_update_time(prev);
3706 n_last_update_time = cfs_rq_last_update_time(next);
3707
3708 __update_load_avg_blocked_se(p_last_update_time, se);
3709 se->avg.last_update_time = n_last_update_time;
3710 }
3711
3712 /*
3713 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3714 * propagate its contribution. The key to this propagation is the invariant
3715 * that for each group:
3716 *
3717 * ge->avg == grq->avg (1)
3718 *
3719 * _IFF_ we look at the pure running and runnable sums. Because they
3720 * represent the very same entity, just at different points in the hierarchy.
3721 *
3722 * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
3723 * and simply copies the running/runnable sum over (but still wrong, because
3724 * the group entity and group rq do not have their PELT windows aligned).
3725 *
3726 * However, update_tg_cfs_load() is more complex. So we have:
3727 *
3728 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3729 *
3730 * And since, like util, the runnable part should be directly transferable,
3731 * the following would _appear_ to be the straight forward approach:
3732 *
3733 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3734 *
3735 * And per (1) we have:
3736 *
3737 * ge->avg.runnable_avg == grq->avg.runnable_avg
3738 *
3739 * Which gives:
3740 *
3741 * ge->load.weight * grq->avg.load_avg
3742 * ge->avg.load_avg = ----------------------------------- (4)
3743 * grq->load.weight
3744 *
3745 * Except that is wrong!
3746 *
3747 * Because while for entities historical weight is not important and we
3748 * really only care about our future and therefore can consider a pure
3749 * runnable sum, runqueues can NOT do this.
3750 *
3751 * We specifically want runqueues to have a load_avg that includes
3752 * historical weights. Those represent the blocked load, the load we expect
3753 * to (shortly) return to us. This only works by keeping the weights as
3754 * integral part of the sum. We therefore cannot decompose as per (3).
3755 *
3756 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3757 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3758 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3759 * runnable section of these tasks overlap (or not). If they were to perfectly
3760 * align the rq as a whole would be runnable 2/3 of the time. If however we
3761 * always have at least 1 runnable task, the rq as a whole is always runnable.
3762 *
3763 * So we'll have to approximate.. :/
3764 *
3765 * Given the constraint:
3766 *
3767 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3768 *
3769 * We can construct a rule that adds runnable to a rq by assuming minimal
3770 * overlap.
3771 *
3772 * On removal, we'll assume each task is equally runnable; which yields:
3773 *
3774 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3775 *
3776 * XXX: only do this for the part of runnable > running ?
3777 *
3778 */
3779 static inline void
3780 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3781 {
3782 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
3783 u32 new_sum, divider;
3784
3785 /* Nothing to update */
3786 if (!delta_avg)
3787 return;
3788
3789 /*
3790 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3791 * See ___update_load_avg() for details.
3792 */
3793 divider = get_pelt_divider(&cfs_rq->avg);
3794
3795
3796 /* Set new sched_entity's utilization */
3797 se->avg.util_avg = gcfs_rq->avg.util_avg;
3798 new_sum = se->avg.util_avg * divider;
3799 delta_sum = (long)new_sum - (long)se->avg.util_sum;
3800 se->avg.util_sum = new_sum;
3801
3802 /* Update parent cfs_rq utilization */
3803 add_positive(&cfs_rq->avg.util_avg, delta_avg);
3804 add_positive(&cfs_rq->avg.util_sum, delta_sum);
3805
3806 /* See update_cfs_rq_load_avg() */
3807 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
3808 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
3809 }
3810
3811 static inline void
3812 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3813 {
3814 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3815 u32 new_sum, divider;
3816
3817 /* Nothing to update */
3818 if (!delta_avg)
3819 return;
3820
3821 /*
3822 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3823 * See ___update_load_avg() for details.
3824 */
3825 divider = get_pelt_divider(&cfs_rq->avg);
3826
3827 /* Set new sched_entity's runnable */
3828 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3829 new_sum = se->avg.runnable_avg * divider;
3830 delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
3831 se->avg.runnable_sum = new_sum;
3832
3833 /* Update parent cfs_rq runnable */
3834 add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
3835 add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
3836 /* See update_cfs_rq_load_avg() */
3837 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
3838 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
3839 }
3840
3841 static inline void
3842 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
3843 {
3844 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3845 unsigned long load_avg;
3846 u64 load_sum = 0;
3847 s64 delta_sum;
3848 u32 divider;
3849
3850 if (!runnable_sum)
3851 return;
3852
3853 gcfs_rq->prop_runnable_sum = 0;
3854
3855 /*
3856 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3857 * See ___update_load_avg() for details.
3858 */
3859 divider = get_pelt_divider(&cfs_rq->avg);
3860
3861 if (runnable_sum >= 0) {
3862 /*
3863 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3864 * the CPU is saturated running == runnable.
3865 */
3866 runnable_sum += se->avg.load_sum;
3867 runnable_sum = min_t(long, runnable_sum, divider);
3868 } else {
3869 /*
3870 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3871 * assuming all tasks are equally runnable.
3872 */
3873 if (scale_load_down(gcfs_rq->load.weight)) {
3874 load_sum = div_u64(gcfs_rq->avg.load_sum,
3875 scale_load_down(gcfs_rq->load.weight));
3876 }
3877
3878 /* But make sure to not inflate se's runnable */
3879 runnable_sum = min(se->avg.load_sum, load_sum);
3880 }
3881
3882 /*
3883 * runnable_sum can't be lower than running_sum
3884 * Rescale running sum to be in the same range as runnable sum
3885 * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
3886 * runnable_sum is in [0 : LOAD_AVG_MAX]
3887 */
3888 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
3889 runnable_sum = max(runnable_sum, running_sum);
3890
3891 load_sum = se_weight(se) * runnable_sum;
3892 load_avg = div_u64(load_sum, divider);
3893
3894 delta_avg = load_avg - se->avg.load_avg;
3895 if (!delta_avg)
3896 return;
3897
3898 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3899
3900 se->avg.load_sum = runnable_sum;
3901 se->avg.load_avg = load_avg;
3902 add_positive(&cfs_rq->avg.load_avg, delta_avg);
3903 add_positive(&cfs_rq->avg.load_sum, delta_sum);
3904 /* See update_cfs_rq_load_avg() */
3905 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3906 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3907 }
3908
3909 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
3910 {
3911 cfs_rq->propagate = 1;
3912 cfs_rq->prop_runnable_sum += runnable_sum;
3913 }
3914
3915 /* Update task and its cfs_rq load average */
3916 static inline int propagate_entity_load_avg(struct sched_entity *se)
3917 {
3918 struct cfs_rq *cfs_rq, *gcfs_rq;
3919
3920 if (entity_is_task(se))
3921 return 0;
3922
3923 gcfs_rq = group_cfs_rq(se);
3924 if (!gcfs_rq->propagate)
3925 return 0;
3926
3927 gcfs_rq->propagate = 0;
3928
3929 cfs_rq = cfs_rq_of(se);
3930
3931 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
3932
3933 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3934 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
3935 update_tg_cfs_load(cfs_rq, se, gcfs_rq);
3936
3937 trace_pelt_cfs_tp(cfs_rq);
3938 trace_pelt_se_tp(se);
3939
3940 return 1;
3941 }
3942
3943 /*
3944 * Check if we need to update the load and the utilization of a blocked
3945 * group_entity:
3946 */
3947 static inline bool skip_blocked_update(struct sched_entity *se)
3948 {
3949 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3950
3951 /*
3952 * If sched_entity still have not zero load or utilization, we have to
3953 * decay it:
3954 */
3955 if (se->avg.load_avg || se->avg.util_avg)
3956 return false;
3957
3958 /*
3959 * If there is a pending propagation, we have to update the load and
3960 * the utilization of the sched_entity:
3961 */
3962 if (gcfs_rq->propagate)
3963 return false;
3964
3965 /*
3966 * Otherwise, the load and the utilization of the sched_entity is
3967 * already zero and there is no pending propagation, so it will be a
3968 * waste of time to try to decay it:
3969 */
3970 return true;
3971 }
3972
3973 #else /* CONFIG_FAIR_GROUP_SCHED */
3974
3975 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
3976
3977 static inline int propagate_entity_load_avg(struct sched_entity *se)
3978 {
3979 return 0;
3980 }
3981
3982 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
3983
3984 #endif /* CONFIG_FAIR_GROUP_SCHED */
3985
3986 #ifdef CONFIG_NO_HZ_COMMON
3987 static inline void migrate_se_pelt_lag(struct sched_entity *se)
3988 {
3989 u64 throttled = 0, now, lut;
3990 struct cfs_rq *cfs_rq;
3991 struct rq *rq;
3992 bool is_idle;
3993
3994 if (load_avg_is_decayed(&se->avg))
3995 return;
3996
3997 cfs_rq = cfs_rq_of(se);
3998 rq = rq_of(cfs_rq);
3999
4000 rcu_read_lock();
4001 is_idle = is_idle_task(rcu_dereference(rq->curr));
4002 rcu_read_unlock();
4003
4004 /*
4005 * The lag estimation comes with a cost we don't want to pay all the
4006 * time. Hence, limiting to the case where the source CPU is idle and
4007 * we know we are at the greatest risk to have an outdated clock.
4008 */
4009 if (!is_idle)
4010 return;
4011
4012 /*
4013 * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
4014 *
4015 * last_update_time (the cfs_rq's last_update_time)
4016 * = cfs_rq_clock_pelt()@cfs_rq_idle
4017 * = rq_clock_pelt()@cfs_rq_idle
4018 * - cfs->throttled_clock_pelt_time@cfs_rq_idle
4019 *
4020 * cfs_idle_lag (delta between rq's update and cfs_rq's update)
4021 * = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
4022 *
4023 * rq_idle_lag (delta between now and rq's update)
4024 * = sched_clock_cpu() - rq_clock()@rq_idle
4025 *
4026 * We can then write:
4027 *
4028 * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
4029 * sched_clock_cpu() - rq_clock()@rq_idle
4030 * Where:
4031 * rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
4032 * rq_clock()@rq_idle is rq->clock_idle
4033 * cfs->throttled_clock_pelt_time@cfs_rq_idle
4034 * is cfs_rq->throttled_pelt_idle
4035 */
4036
4037 #ifdef CONFIG_CFS_BANDWIDTH
4038 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
4039 /* The clock has been stopped for throttling */
4040 if (throttled == U64_MAX)
4041 return;
4042 #endif
4043 now = u64_u32_load(rq->clock_pelt_idle);
4044 /*
4045 * Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case
4046 * is observed the old clock_pelt_idle value and the new clock_idle,
4047 * which lead to an underestimation. The opposite would lead to an
4048 * overestimation.
4049 */
4050 smp_rmb();
4051 lut = cfs_rq_last_update_time(cfs_rq);
4052
4053 now -= throttled;
4054 if (now < lut)
4055 /*
4056 * cfs_rq->avg.last_update_time is more recent than our
4057 * estimation, let's use it.
4058 */
4059 now = lut;
4060 else
4061 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
4062
4063 __update_load_avg_blocked_se(now, se);
4064 }
4065 #else
4066 static void migrate_se_pelt_lag(struct sched_entity *se) {}
4067 #endif
4068
4069 /**
4070 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
4071 * @now: current time, as per cfs_rq_clock_pelt()
4072 * @cfs_rq: cfs_rq to update
4073 *
4074 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
4075 * avg. The immediate corollary is that all (fair) tasks must be attached.
4076 *
4077 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
4078 *
4079 * Return: true if the load decayed or we removed load.
4080 *
4081 * Since both these conditions indicate a changed cfs_rq->avg.load we should
4082 * call update_tg_load_avg() when this function returns true.
4083 */
4084 static inline int
4085 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4086 {
4087 unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
4088 struct sched_avg *sa = &cfs_rq->avg;
4089 int decayed = 0;
4090
4091 if (cfs_rq->removed.nr) {
4092 unsigned long r;
4093 u32 divider = get_pelt_divider(&cfs_rq->avg);
4094
4095 raw_spin_lock(&cfs_rq->removed.lock);
4096 swap(cfs_rq->removed.util_avg, removed_util);
4097 swap(cfs_rq->removed.load_avg, removed_load);
4098 swap(cfs_rq->removed.runnable_avg, removed_runnable);
4099 cfs_rq->removed.nr = 0;
4100 raw_spin_unlock(&cfs_rq->removed.lock);
4101
4102 r = removed_load;
4103 sub_positive(&sa->load_avg, r);
4104 sub_positive(&sa->load_sum, r * divider);
4105 /* See sa->util_sum below */
4106 sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
4107
4108 r = removed_util;
4109 sub_positive(&sa->util_avg, r);
4110 sub_positive(&sa->util_sum, r * divider);
4111 /*
4112 * Because of rounding, se->util_sum might ends up being +1 more than
4113 * cfs->util_sum. Although this is not a problem by itself, detaching
4114 * a lot of tasks with the rounding problem between 2 updates of
4115 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
4116 * cfs_util_avg is not.
4117 * Check that util_sum is still above its lower bound for the new
4118 * util_avg. Given that period_contrib might have moved since the last
4119 * sync, we are only sure that util_sum must be above or equal to
4120 * util_avg * minimum possible divider
4121 */
4122 sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
4123
4124 r = removed_runnable;
4125 sub_positive(&sa->runnable_avg, r);
4126 sub_positive(&sa->runnable_sum, r * divider);
4127 /* See sa->util_sum above */
4128 sa->runnable_sum = max_t(u32, sa->runnable_sum,
4129 sa->runnable_avg * PELT_MIN_DIVIDER);
4130
4131 /*
4132 * removed_runnable is the unweighted version of removed_load so we
4133 * can use it to estimate removed_load_sum.
4134 */
4135 add_tg_cfs_propagate(cfs_rq,
4136 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
4137
4138 decayed = 1;
4139 }
4140
4141 decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
4142 u64_u32_store_copy(sa->last_update_time,
4143 cfs_rq->last_update_time_copy,
4144 sa->last_update_time);
4145 return decayed;
4146 }
4147
4148 /**
4149 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
4150 * @cfs_rq: cfs_rq to attach to
4151 * @se: sched_entity to attach
4152 *
4153 * Must call update_cfs_rq_load_avg() before this, since we rely on
4154 * cfs_rq->avg.last_update_time being current.
4155 */
4156 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4157 {
4158 /*
4159 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4160 * See ___update_load_avg() for details.
4161 */
4162 u32 divider = get_pelt_divider(&cfs_rq->avg);
4163
4164 /*
4165 * When we attach the @se to the @cfs_rq, we must align the decay
4166 * window because without that, really weird and wonderful things can
4167 * happen.
4168 *
4169 * XXX illustrate
4170 */
4171 se->avg.last_update_time = cfs_rq->avg.last_update_time;
4172 se->avg.period_contrib = cfs_rq->avg.period_contrib;
4173
4174 /*
4175 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
4176 * period_contrib. This isn't strictly correct, but since we're
4177 * entirely outside of the PELT hierarchy, nobody cares if we truncate
4178 * _sum a little.
4179 */
4180 se->avg.util_sum = se->avg.util_avg * divider;
4181
4182 se->avg.runnable_sum = se->avg.runnable_avg * divider;
4183
4184 se->avg.load_sum = se->avg.load_avg * divider;
4185 if (se_weight(se) < se->avg.load_sum)
4186 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
4187 else
4188 se->avg.load_sum = 1;
4189
4190 enqueue_load_avg(cfs_rq, se);
4191 cfs_rq->avg.util_avg += se->avg.util_avg;
4192 cfs_rq->avg.util_sum += se->avg.util_sum;
4193 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4194 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4195
4196 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4197
4198 cfs_rq_util_change(cfs_rq, 0);
4199
4200 trace_pelt_cfs_tp(cfs_rq);
4201 }
4202
4203 /**
4204 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
4205 * @cfs_rq: cfs_rq to detach from
4206 * @se: sched_entity to detach
4207 *
4208 * Must call update_cfs_rq_load_avg() before this, since we rely on
4209 * cfs_rq->avg.last_update_time being current.
4210 */
4211 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4212 {
4213 dequeue_load_avg(cfs_rq, se);
4214 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4215 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4216 /* See update_cfs_rq_load_avg() */
4217 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4218 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4219
4220 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4221 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4222 /* See update_cfs_rq_load_avg() */
4223 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4224 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4225
4226 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4227
4228 cfs_rq_util_change(cfs_rq, 0);
4229
4230 trace_pelt_cfs_tp(cfs_rq);
4231 }
4232
4233 /*
4234 * Optional action to be done while updating the load average
4235 */
4236 #define UPDATE_TG 0x1
4237 #define SKIP_AGE_LOAD 0x2
4238 #define DO_ATTACH 0x4
4239 #define DO_DETACH 0x8
4240
4241 /* Update task and its cfs_rq load average */
4242 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4243 {
4244 u64 now = cfs_rq_clock_pelt(cfs_rq);
4245 int decayed;
4246
4247 /*
4248 * Track task load average for carrying it to new CPU after migrated, and
4249 * track group sched_entity load average for task_h_load calc in migration
4250 */
4251 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4252 __update_load_avg_se(now, cfs_rq, se);
4253
4254 decayed = update_cfs_rq_load_avg(now, cfs_rq);
4255 decayed |= propagate_entity_load_avg(se);
4256
4257 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4258
4259 /*
4260 * DO_ATTACH means we're here from enqueue_entity().
4261 * !last_update_time means we've passed through
4262 * migrate_task_rq_fair() indicating we migrated.
4263 *
4264 * IOW we're enqueueing a task on a new CPU.
4265 */
4266 attach_entity_load_avg(cfs_rq, se);
4267 update_tg_load_avg(cfs_rq);
4268
4269 } else if (flags & DO_DETACH) {
4270 /*
4271 * DO_DETACH means we're here from dequeue_entity()
4272 * and we are migrating task out of the CPU.
4273 */
4274 detach_entity_load_avg(cfs_rq, se);
4275 update_tg_load_avg(cfs_rq);
4276 } else if (decayed) {
4277 cfs_rq_util_change(cfs_rq, 0);
4278
4279 if (flags & UPDATE_TG)
4280 update_tg_load_avg(cfs_rq);
4281 }
4282 }
4283
4284 /*
4285 * Synchronize entity load avg of dequeued entity without locking
4286 * the previous rq.
4287 */
4288 static void sync_entity_load_avg(struct sched_entity *se)
4289 {
4290 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4291 u64 last_update_time;
4292
4293 last_update_time = cfs_rq_last_update_time(cfs_rq);
4294 __update_load_avg_blocked_se(last_update_time, se);
4295 }
4296
4297 /*
4298 * Task first catches up with cfs_rq, and then subtract
4299 * itself from the cfs_rq (task must be off the queue now).
4300 */
4301 static void remove_entity_load_avg(struct sched_entity *se)
4302 {
4303 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4304 unsigned long flags;
4305
4306 /*
4307 * tasks cannot exit without having gone through wake_up_new_task() ->
4308 * enqueue_task_fair() which will have added things to the cfs_rq,
4309 * so we can remove unconditionally.
4310 */
4311
4312 sync_entity_load_avg(se);
4313
4314 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4315 ++cfs_rq->removed.nr;
4316 cfs_rq->removed.util_avg += se->avg.util_avg;
4317 cfs_rq->removed.load_avg += se->avg.load_avg;
4318 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4319 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4320 }
4321
4322 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
4323 {
4324 return cfs_rq->avg.runnable_avg;
4325 }
4326
4327 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
4328 {
4329 return cfs_rq->avg.load_avg;
4330 }
4331
4332 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
4333
4334 static inline unsigned long task_util(struct task_struct *p)
4335 {
4336 return READ_ONCE(p->se.avg.util_avg);
4337 }
4338
4339 static inline unsigned long _task_util_est(struct task_struct *p)
4340 {
4341 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4342
4343 return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
4344 }
4345
4346 static inline unsigned long task_util_est(struct task_struct *p)
4347 {
4348 return max(task_util(p), _task_util_est(p));
4349 }
4350
4351 #ifdef CONFIG_UCLAMP_TASK
4352 static inline unsigned long uclamp_task_util(struct task_struct *p,
4353 unsigned long uclamp_min,
4354 unsigned long uclamp_max)
4355 {
4356 return clamp(task_util_est(p), uclamp_min, uclamp_max);
4357 }
4358 #else
4359 static inline unsigned long uclamp_task_util(struct task_struct *p,
4360 unsigned long uclamp_min,
4361 unsigned long uclamp_max)
4362 {
4363 return task_util_est(p);
4364 }
4365 #endif
4366
4367 static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
4368 struct task_struct *p)
4369 {
4370 unsigned int enqueued;
4371
4372 if (!sched_feat(UTIL_EST))
4373 return;
4374
4375 /* Update root cfs_rq's estimated utilization */
4376 enqueued = cfs_rq->avg.util_est.enqueued;
4377 enqueued += _task_util_est(p);
4378 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4379
4380 trace_sched_util_est_cfs_tp(cfs_rq);
4381 }
4382
4383 static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
4384 struct task_struct *p)
4385 {
4386 unsigned int enqueued;
4387
4388 if (!sched_feat(UTIL_EST))
4389 return;
4390
4391 /* Update root cfs_rq's estimated utilization */
4392 enqueued = cfs_rq->avg.util_est.enqueued;
4393 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
4394 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4395
4396 trace_sched_util_est_cfs_tp(cfs_rq);
4397 }
4398
4399 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
4400
4401 /*
4402 * Check if a (signed) value is within a specified (unsigned) margin,
4403 * based on the observation that:
4404 *
4405 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
4406 *
4407 * NOTE: this only works when value + margin < INT_MAX.
4408 */
4409 static inline bool within_margin(int value, int margin)
4410 {
4411 return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
4412 }
4413
4414 static inline void util_est_update(struct cfs_rq *cfs_rq,
4415 struct task_struct *p,
4416 bool task_sleep)
4417 {
4418 long last_ewma_diff, last_enqueued_diff;
4419 struct util_est ue;
4420
4421 if (!sched_feat(UTIL_EST))
4422 return;
4423
4424 /*
4425 * Skip update of task's estimated utilization when the task has not
4426 * yet completed an activation, e.g. being migrated.
4427 */
4428 if (!task_sleep)
4429 return;
4430
4431 /*
4432 * If the PELT values haven't changed since enqueue time,
4433 * skip the util_est update.
4434 */
4435 ue = p->se.avg.util_est;
4436 if (ue.enqueued & UTIL_AVG_UNCHANGED)
4437 return;
4438
4439 last_enqueued_diff = ue.enqueued;
4440
4441 /*
4442 * Reset EWMA on utilization increases, the moving average is used only
4443 * to smooth utilization decreases.
4444 */
4445 ue.enqueued = task_util(p);
4446 if (sched_feat(UTIL_EST_FASTUP)) {
4447 if (ue.ewma < ue.enqueued) {
4448 ue.ewma = ue.enqueued;
4449 goto done;
4450 }
4451 }
4452
4453 /*
4454 * Skip update of task's estimated utilization when its members are
4455 * already ~1% close to its last activation value.
4456 */
4457 last_ewma_diff = ue.enqueued - ue.ewma;
4458 last_enqueued_diff -= ue.enqueued;
4459 if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
4460 if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
4461 goto done;
4462
4463 return;
4464 }
4465
4466 /*
4467 * To avoid overestimation of actual task utilization, skip updates if
4468 * we cannot grant there is idle time in this CPU.
4469 */
4470 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
4471 return;
4472
4473 /*
4474 * Update Task's estimated utilization
4475 *
4476 * When *p completes an activation we can consolidate another sample
4477 * of the task size. This is done by storing the current PELT value
4478 * as ue.enqueued and by using this value to update the Exponential
4479 * Weighted Moving Average (EWMA):
4480 *
4481 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
4482 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
4483 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
4484 * = w * ( last_ewma_diff ) + ewma(t-1)
4485 * = w * (last_ewma_diff + ewma(t-1) / w)
4486 *
4487 * Where 'w' is the weight of new samples, which is configured to be
4488 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
4489 */
4490 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
4491 ue.ewma += last_ewma_diff;
4492 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
4493 done:
4494 ue.enqueued |= UTIL_AVG_UNCHANGED;
4495 WRITE_ONCE(p->se.avg.util_est, ue);
4496
4497 trace_sched_util_est_se_tp(&p->se);
4498 }
4499
4500 static inline int util_fits_cpu(unsigned long util,
4501 unsigned long uclamp_min,
4502 unsigned long uclamp_max,
4503 int cpu)
4504 {
4505 unsigned long capacity_orig, capacity_orig_thermal;
4506 unsigned long capacity = capacity_of(cpu);
4507 bool fits, uclamp_max_fits;
4508
4509 /*
4510 * Check if the real util fits without any uclamp boost/cap applied.
4511 */
4512 fits = fits_capacity(util, capacity);
4513
4514 if (!uclamp_is_used())
4515 return fits;
4516
4517 /*
4518 * We must use capacity_orig_of() for comparing against uclamp_min and
4519 * uclamp_max. We only care about capacity pressure (by using
4520 * capacity_of()) for comparing against the real util.
4521 *
4522 * If a task is boosted to 1024 for example, we don't want a tiny
4523 * pressure to skew the check whether it fits a CPU or not.
4524 *
4525 * Similarly if a task is capped to capacity_orig_of(little_cpu), it
4526 * should fit a little cpu even if there's some pressure.
4527 *
4528 * Only exception is for thermal pressure since it has a direct impact
4529 * on available OPP of the system.
4530 *
4531 * We honour it for uclamp_min only as a drop in performance level
4532 * could result in not getting the requested minimum performance level.
4533 *
4534 * For uclamp_max, we can tolerate a drop in performance level as the
4535 * goal is to cap the task. So it's okay if it's getting less.
4536 */
4537 capacity_orig = capacity_orig_of(cpu);
4538 capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
4539
4540 /*
4541 * We want to force a task to fit a cpu as implied by uclamp_max.
4542 * But we do have some corner cases to cater for..
4543 *
4544 *
4545 * C=z
4546 * | ___
4547 * | C=y | |
4548 * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4549 * | C=x | | | |
4550 * | ___ | | | |
4551 * | | | | | | | (util somewhere in this region)
4552 * | | | | | | |
4553 * | | | | | | |
4554 * +----------------------------------------
4555 * cpu0 cpu1 cpu2
4556 *
4557 * In the above example if a task is capped to a specific performance
4558 * point, y, then when:
4559 *
4560 * * util = 80% of x then it does not fit on cpu0 and should migrate
4561 * to cpu1
4562 * * util = 80% of y then it is forced to fit on cpu1 to honour
4563 * uclamp_max request.
4564 *
4565 * which is what we're enforcing here. A task always fits if
4566 * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
4567 * the normal upmigration rules should withhold still.
4568 *
4569 * Only exception is when we are on max capacity, then we need to be
4570 * careful not to block overutilized state. This is so because:
4571 *
4572 * 1. There's no concept of capping at max_capacity! We can't go
4573 * beyond this performance level anyway.
4574 * 2. The system is being saturated when we're operating near
4575 * max capacity, it doesn't make sense to block overutilized.
4576 */
4577 uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
4578 uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
4579 fits = fits || uclamp_max_fits;
4580
4581 /*
4582 *
4583 * C=z
4584 * | ___ (region a, capped, util >= uclamp_max)
4585 * | C=y | |
4586 * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4587 * | C=x | | | |
4588 * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max)
4589 * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
4590 * | | | | | | |
4591 * | | | | | | | (region c, boosted, util < uclamp_min)
4592 * +----------------------------------------
4593 * cpu0 cpu1 cpu2
4594 *
4595 * a) If util > uclamp_max, then we're capped, we don't care about
4596 * actual fitness value here. We only care if uclamp_max fits
4597 * capacity without taking margin/pressure into account.
4598 * See comment above.
4599 *
4600 * b) If uclamp_min <= util <= uclamp_max, then the normal
4601 * fits_capacity() rules apply. Except we need to ensure that we
4602 * enforce we remain within uclamp_max, see comment above.
4603 *
4604 * c) If util < uclamp_min, then we are boosted. Same as (b) but we
4605 * need to take into account the boosted value fits the CPU without
4606 * taking margin/pressure into account.
4607 *
4608 * Cases (a) and (b) are handled in the 'fits' variable already. We
4609 * just need to consider an extra check for case (c) after ensuring we
4610 * handle the case uclamp_min > uclamp_max.
4611 */
4612 uclamp_min = min(uclamp_min, uclamp_max);
4613 if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
4614 return -1;
4615
4616 return fits;
4617 }
4618
4619 static inline int task_fits_cpu(struct task_struct *p, int cpu)
4620 {
4621 unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
4622 unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
4623 unsigned long util = task_util_est(p);
4624 /*
4625 * Return true only if the cpu fully fits the task requirements, which
4626 * include the utilization but also the performance hints.
4627 */
4628 return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
4629 }
4630
4631 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
4632 {
4633 if (!sched_asym_cpucap_active())
4634 return;
4635
4636 if (!p || p->nr_cpus_allowed == 1) {
4637 rq->misfit_task_load = 0;
4638 return;
4639 }
4640
4641 if (task_fits_cpu(p, cpu_of(rq))) {
4642 rq->misfit_task_load = 0;
4643 return;
4644 }
4645
4646 /*
4647 * Make sure that misfit_task_load will not be null even if
4648 * task_h_load() returns 0.
4649 */
4650 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
4651 }
4652
4653 #else /* CONFIG_SMP */
4654
4655 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
4656 {
4657 return true;
4658 }
4659
4660 #define UPDATE_TG 0x0
4661 #define SKIP_AGE_LOAD 0x0
4662 #define DO_ATTACH 0x0
4663 #define DO_DETACH 0x0
4664
4665 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
4666 {
4667 cfs_rq_util_change(cfs_rq, 0);
4668 }
4669
4670 static inline void remove_entity_load_avg(struct sched_entity *se) {}
4671
4672 static inline void
4673 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
4674 static inline void
4675 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
4676
4677 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
4678 {
4679 return 0;
4680 }
4681
4682 static inline void
4683 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
4684
4685 static inline void
4686 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
4687
4688 static inline void
4689 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
4690 bool task_sleep) {}
4691 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
4692
4693 #endif /* CONFIG_SMP */
4694
4695 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
4696 {
4697 #ifdef CONFIG_SCHED_DEBUG
4698 s64 d = se->vruntime - cfs_rq->min_vruntime;
4699
4700 if (d < 0)
4701 d = -d;
4702
4703 if (d > 3*sysctl_sched_latency)
4704 schedstat_inc(cfs_rq->nr_spread_over);
4705 #endif
4706 }
4707
4708 static inline bool entity_is_long_sleeper(struct sched_entity *se)
4709 {
4710 struct cfs_rq *cfs_rq;
4711 u64 sleep_time;
4712
4713 if (se->exec_start == 0)
4714 return false;
4715
4716 cfs_rq = cfs_rq_of(se);
4717
4718 sleep_time = rq_clock_task(rq_of(cfs_rq));
4719
4720 /* Happen while migrating because of clock task divergence */
4721 if (sleep_time <= se->exec_start)
4722 return false;
4723
4724 sleep_time -= se->exec_start;
4725 if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
4726 return true;
4727
4728 return false;
4729 }
4730
4731 static void
4732 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
4733 {
4734 u64 vruntime = cfs_rq->min_vruntime;
4735
4736 /*
4737 * The 'current' period is already promised to the current tasks,
4738 * however the extra weight of the new task will slow them down a
4739 * little, place the new task so that it fits in the slot that
4740 * stays open at the end.
4741 */
4742 if (initial && sched_feat(START_DEBIT))
4743 vruntime += sched_vslice(cfs_rq, se);
4744
4745 /* sleeps up to a single latency don't count. */
4746 if (!initial) {
4747 unsigned long thresh;
4748
4749 if (se_is_idle(se))
4750 thresh = sysctl_sched_min_granularity;
4751 else
4752 thresh = sysctl_sched_latency;
4753
4754 /*
4755 * Halve their sleep time's effect, to allow
4756 * for a gentler effect of sleepers:
4757 */
4758 if (sched_feat(GENTLE_FAIR_SLEEPERS))
4759 thresh >>= 1;
4760
4761 vruntime -= thresh;
4762 }
4763
4764 /*
4765 * Pull vruntime of the entity being placed to the base level of
4766 * cfs_rq, to prevent boosting it if placed backwards.
4767 * However, min_vruntime can advance much faster than real time, with
4768 * the extreme being when an entity with the minimal weight always runs
4769 * on the cfs_rq. If the waking entity slept for a long time, its
4770 * vruntime difference from min_vruntime may overflow s64 and their
4771 * comparison may get inversed, so ignore the entity's original
4772 * vruntime in that case.
4773 * The maximal vruntime speedup is given by the ratio of normal to
4774 * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
4775 * When placing a migrated waking entity, its exec_start has been set
4776 * from a different rq. In order to take into account a possible
4777 * divergence between new and prev rq's clocks task because of irq and
4778 * stolen time, we take an additional margin.
4779 * So, cutting off on the sleep time of
4780 * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
4781 * should be safe.
4782 */
4783 if (entity_is_long_sleeper(se))
4784 se->vruntime = vruntime;
4785 else
4786 se->vruntime = max_vruntime(se->vruntime, vruntime);
4787 }
4788
4789 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4790
4791 static inline bool cfs_bandwidth_used(void);
4792
4793 /*
4794 * MIGRATION
4795 *
4796 * dequeue
4797 * update_curr()
4798 * update_min_vruntime()
4799 * vruntime -= min_vruntime
4800 *
4801 * enqueue
4802 * update_curr()
4803 * update_min_vruntime()
4804 * vruntime += min_vruntime
4805 *
4806 * this way the vruntime transition between RQs is done when both
4807 * min_vruntime are up-to-date.
4808 *
4809 * WAKEUP (remote)
4810 *
4811 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
4812 * vruntime -= min_vruntime
4813 *
4814 * enqueue
4815 * update_curr()
4816 * update_min_vruntime()
4817 * vruntime += min_vruntime
4818 *
4819 * this way we don't have the most up-to-date min_vruntime on the originating
4820 * CPU and an up-to-date min_vruntime on the destination CPU.
4821 */
4822
4823 static void
4824 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4825 {
4826 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
4827 bool curr = cfs_rq->curr == se;
4828
4829 /*
4830 * If we're the current task, we must renormalise before calling
4831 * update_curr().
4832 */
4833 if (renorm && curr)
4834 se->vruntime += cfs_rq->min_vruntime;
4835
4836 update_curr(cfs_rq);
4837
4838 /*
4839 * Otherwise, renormalise after, such that we're placed at the current
4840 * moment in time, instead of some random moment in the past. Being
4841 * placed in the past could significantly boost this task to the
4842 * fairness detriment of existing tasks.
4843 */
4844 if (renorm && !curr)
4845 se->vruntime += cfs_rq->min_vruntime;
4846
4847 /*
4848 * When enqueuing a sched_entity, we must:
4849 * - Update loads to have both entity and cfs_rq synced with now.
4850 * - For group_entity, update its runnable_weight to reflect the new
4851 * h_nr_running of its group cfs_rq.
4852 * - For group_entity, update its weight to reflect the new share of
4853 * its group cfs_rq
4854 * - Add its new weight to cfs_rq->load.weight
4855 */
4856 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
4857 se_update_runnable(se);
4858 update_cfs_group(se);
4859 account_entity_enqueue(cfs_rq, se);
4860
4861 if (flags & ENQUEUE_WAKEUP)
4862 place_entity(cfs_rq, se, 0);
4863 /* Entity has migrated, no longer consider this task hot */
4864 if (flags & ENQUEUE_MIGRATED)
4865 se->exec_start = 0;
4866
4867 check_schedstat_required();
4868 update_stats_enqueue_fair(cfs_rq, se, flags);
4869 check_spread(cfs_rq, se);
4870 if (!curr)
4871 __enqueue_entity(cfs_rq, se);
4872 se->on_rq = 1;
4873
4874 if (cfs_rq->nr_running == 1) {
4875 check_enqueue_throttle(cfs_rq);
4876 if (!throttled_hierarchy(cfs_rq))
4877 list_add_leaf_cfs_rq(cfs_rq);
4878 }
4879 }
4880
4881 static void __clear_buddies_last(struct sched_entity *se)
4882 {
4883 for_each_sched_entity(se) {
4884 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4885 if (cfs_rq->last != se)
4886 break;
4887
4888 cfs_rq->last = NULL;
4889 }
4890 }
4891
4892 static void __clear_buddies_next(struct sched_entity *se)
4893 {
4894 for_each_sched_entity(se) {
4895 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4896 if (cfs_rq->next != se)
4897 break;
4898
4899 cfs_rq->next = NULL;
4900 }
4901 }
4902
4903 static void __clear_buddies_skip(struct sched_entity *se)
4904 {
4905 for_each_sched_entity(se) {
4906 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4907 if (cfs_rq->skip != se)
4908 break;
4909
4910 cfs_rq->skip = NULL;
4911 }
4912 }
4913
4914 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
4915 {
4916 if (cfs_rq->last == se)
4917 __clear_buddies_last(se);
4918
4919 if (cfs_rq->next == se)
4920 __clear_buddies_next(se);
4921
4922 if (cfs_rq->skip == se)
4923 __clear_buddies_skip(se);
4924 }
4925
4926 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4927
4928 static void
4929 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4930 {
4931 int action = UPDATE_TG;
4932
4933 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
4934 action |= DO_DETACH;
4935
4936 /*
4937 * Update run-time statistics of the 'current'.
4938 */
4939 update_curr(cfs_rq);
4940
4941 /*
4942 * When dequeuing a sched_entity, we must:
4943 * - Update loads to have both entity and cfs_rq synced with now.
4944 * - For group_entity, update its runnable_weight to reflect the new
4945 * h_nr_running of its group cfs_rq.
4946 * - Subtract its previous weight from cfs_rq->load.weight.
4947 * - For group entity, update its weight to reflect the new share
4948 * of its group cfs_rq.
4949 */
4950 update_load_avg(cfs_rq, se, action);
4951 se_update_runnable(se);
4952
4953 update_stats_dequeue_fair(cfs_rq, se, flags);
4954
4955 clear_buddies(cfs_rq, se);
4956
4957 if (se != cfs_rq->curr)
4958 __dequeue_entity(cfs_rq, se);
4959 se->on_rq = 0;
4960 account_entity_dequeue(cfs_rq, se);
4961
4962 /*
4963 * Normalize after update_curr(); which will also have moved
4964 * min_vruntime if @se is the one holding it back. But before doing
4965 * update_min_vruntime() again, which will discount @se's position and
4966 * can move min_vruntime forward still more.
4967 */
4968 if (!(flags & DEQUEUE_SLEEP))
4969 se->vruntime -= cfs_rq->min_vruntime;
4970
4971 /* return excess runtime on last dequeue */
4972 return_cfs_rq_runtime(cfs_rq);
4973
4974 update_cfs_group(se);
4975
4976 /*
4977 * Now advance min_vruntime if @se was the entity holding it back,
4978 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
4979 * put back on, and if we advance min_vruntime, we'll be placed back
4980 * further than we started -- ie. we'll be penalized.
4981 */
4982 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
4983 update_min_vruntime(cfs_rq);
4984
4985 if (cfs_rq->nr_running == 0)
4986 update_idle_cfs_rq_clock_pelt(cfs_rq);
4987 }
4988
4989 /*
4990 * Preempt the current task with a newly woken task if needed:
4991 */
4992 static void
4993 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
4994 {
4995 unsigned long ideal_runtime, delta_exec;
4996 struct sched_entity *se;
4997 s64 delta;
4998
4999 /*
5000 * When many tasks blow up the sched_period; it is possible that
5001 * sched_slice() reports unusually large results (when many tasks are
5002 * very light for example). Therefore impose a maximum.
5003 */
5004 ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency);
5005
5006 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
5007 if (delta_exec > ideal_runtime) {
5008 resched_curr(rq_of(cfs_rq));
5009 /*
5010 * The current task ran long enough, ensure it doesn't get
5011 * re-elected due to buddy favours.
5012 */
5013 clear_buddies(cfs_rq, curr);
5014 return;
5015 }
5016
5017 /*
5018 * Ensure that a task that missed wakeup preemption by a
5019 * narrow margin doesn't have to wait for a full slice.
5020 * This also mitigates buddy induced latencies under load.
5021 */
5022 if (delta_exec < sysctl_sched_min_granularity)
5023 return;
5024
5025 se = __pick_first_entity(cfs_rq);
5026 delta = curr->vruntime - se->vruntime;
5027
5028 if (delta < 0)
5029 return;
5030
5031 if (delta > ideal_runtime)
5032 resched_curr(rq_of(cfs_rq));
5033 }
5034
5035 static void
5036 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
5037 {
5038 clear_buddies(cfs_rq, se);
5039
5040 /* 'current' is not kept within the tree. */
5041 if (se->on_rq) {
5042 /*
5043 * Any task has to be enqueued before it get to execute on
5044 * a CPU. So account for the time it spent waiting on the
5045 * runqueue.
5046 */
5047 update_stats_wait_end_fair(cfs_rq, se);
5048 __dequeue_entity(cfs_rq, se);
5049 update_load_avg(cfs_rq, se, UPDATE_TG);
5050 }
5051
5052 update_stats_curr_start(cfs_rq, se);
5053 cfs_rq->curr = se;
5054
5055 /*
5056 * Track our maximum slice length, if the CPU's load is at
5057 * least twice that of our own weight (i.e. dont track it
5058 * when there are only lesser-weight tasks around):
5059 */
5060 if (schedstat_enabled() &&
5061 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
5062 struct sched_statistics *stats;
5063
5064 stats = __schedstats_from_se(se);
5065 __schedstat_set(stats->slice_max,
5066 max((u64)stats->slice_max,
5067 se->sum_exec_runtime - se->prev_sum_exec_runtime));
5068 }
5069
5070 se->prev_sum_exec_runtime = se->sum_exec_runtime;
5071 }
5072
5073 static int
5074 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
5075
5076 /*
5077 * Pick the next process, keeping these things in mind, in this order:
5078 * 1) keep things fair between processes/task groups
5079 * 2) pick the "next" process, since someone really wants that to run
5080 * 3) pick the "last" process, for cache locality
5081 * 4) do not run the "skip" process, if something else is available
5082 */
5083 static struct sched_entity *
5084 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
5085 {
5086 struct sched_entity *left = __pick_first_entity(cfs_rq);
5087 struct sched_entity *se;
5088
5089 /*
5090 * If curr is set we have to see if its left of the leftmost entity
5091 * still in the tree, provided there was anything in the tree at all.
5092 */
5093 if (!left || (curr && entity_before(curr, left)))
5094 left = curr;
5095
5096 se = left; /* ideally we run the leftmost entity */
5097
5098 /*
5099 * Avoid running the skip buddy, if running something else can
5100 * be done without getting too unfair.
5101 */
5102 if (cfs_rq->skip && cfs_rq->skip == se) {
5103 struct sched_entity *second;
5104
5105 if (se == curr) {
5106 second = __pick_first_entity(cfs_rq);
5107 } else {
5108 second = __pick_next_entity(se);
5109 if (!second || (curr && entity_before(curr, second)))
5110 second = curr;
5111 }
5112
5113 if (second && wakeup_preempt_entity(second, left) < 1)
5114 se = second;
5115 }
5116
5117 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
5118 /*
5119 * Someone really wants this to run. If it's not unfair, run it.
5120 */
5121 se = cfs_rq->next;
5122 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
5123 /*
5124 * Prefer last buddy, try to return the CPU to a preempted task.
5125 */
5126 se = cfs_rq->last;
5127 }
5128
5129 return se;
5130 }
5131
5132 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5133
5134 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
5135 {
5136 /*
5137 * If still on the runqueue then deactivate_task()
5138 * was not called and update_curr() has to be done:
5139 */
5140 if (prev->on_rq)
5141 update_curr(cfs_rq);
5142
5143 /* throttle cfs_rqs exceeding runtime */
5144 check_cfs_rq_runtime(cfs_rq);
5145
5146 check_spread(cfs_rq, prev);
5147
5148 if (prev->on_rq) {
5149 update_stats_wait_start_fair(cfs_rq, prev);
5150 /* Put 'current' back into the tree. */
5151 __enqueue_entity(cfs_rq, prev);
5152 /* in !on_rq case, update occurred at dequeue */
5153 update_load_avg(cfs_rq, prev, 0);
5154 }
5155 cfs_rq->curr = NULL;
5156 }
5157
5158 static void
5159 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
5160 {
5161 /*
5162 * Update run-time statistics of the 'current'.
5163 */
5164 update_curr(cfs_rq);
5165
5166 /*
5167 * Ensure that runnable average is periodically updated.
5168 */
5169 update_load_avg(cfs_rq, curr, UPDATE_TG);
5170 update_cfs_group(curr);
5171
5172 #ifdef CONFIG_SCHED_HRTICK
5173 /*
5174 * queued ticks are scheduled to match the slice, so don't bother
5175 * validating it and just reschedule.
5176 */
5177 if (queued) {
5178 resched_curr(rq_of(cfs_rq));
5179 return;
5180 }
5181 /*
5182 * don't let the period tick interfere with the hrtick preemption
5183 */
5184 if (!sched_feat(DOUBLE_TICK) &&
5185 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
5186 return;
5187 #endif
5188
5189 if (cfs_rq->nr_running > 1)
5190 check_preempt_tick(cfs_rq, curr);
5191 }
5192
5193
5194 /**************************************************
5195 * CFS bandwidth control machinery
5196 */
5197
5198 #ifdef CONFIG_CFS_BANDWIDTH
5199
5200 #ifdef CONFIG_JUMP_LABEL
5201 static struct static_key __cfs_bandwidth_used;
5202
5203 static inline bool cfs_bandwidth_used(void)
5204 {
5205 return static_key_false(&__cfs_bandwidth_used);
5206 }
5207
5208 void cfs_bandwidth_usage_inc(void)
5209 {
5210 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
5211 }
5212
5213 void cfs_bandwidth_usage_dec(void)
5214 {
5215 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
5216 }
5217 #else /* CONFIG_JUMP_LABEL */
5218 static bool cfs_bandwidth_used(void)
5219 {
5220 return true;
5221 }
5222
5223 void cfs_bandwidth_usage_inc(void) {}
5224 void cfs_bandwidth_usage_dec(void) {}
5225 #endif /* CONFIG_JUMP_LABEL */
5226
5227 /*
5228 * default period for cfs group bandwidth.
5229 * default: 0.1s, units: nanoseconds
5230 */
5231 static inline u64 default_cfs_period(void)
5232 {
5233 return 100000000ULL;
5234 }
5235
5236 static inline u64 sched_cfs_bandwidth_slice(void)
5237 {
5238 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
5239 }
5240
5241 /*
5242 * Replenish runtime according to assigned quota. We use sched_clock_cpu
5243 * directly instead of rq->clock to avoid adding additional synchronization
5244 * around rq->lock.
5245 *
5246 * requires cfs_b->lock
5247 */
5248 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
5249 {
5250 s64 runtime;
5251
5252 if (unlikely(cfs_b->quota == RUNTIME_INF))
5253 return;
5254
5255 cfs_b->runtime += cfs_b->quota;
5256 runtime = cfs_b->runtime_snap - cfs_b->runtime;
5257 if (runtime > 0) {
5258 cfs_b->burst_time += runtime;
5259 cfs_b->nr_burst++;
5260 }
5261
5262 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
5263 cfs_b->runtime_snap = cfs_b->runtime;
5264 }
5265
5266 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5267 {
5268 return &tg->cfs_bandwidth;
5269 }
5270
5271 /* returns 0 on failure to allocate runtime */
5272 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
5273 struct cfs_rq *cfs_rq, u64 target_runtime)
5274 {
5275 u64 min_amount, amount = 0;
5276
5277 lockdep_assert_held(&cfs_b->lock);
5278
5279 /* note: this is a positive sum as runtime_remaining <= 0 */
5280 min_amount = target_runtime - cfs_rq->runtime_remaining;
5281
5282 if (cfs_b->quota == RUNTIME_INF)
5283 amount = min_amount;
5284 else {
5285 start_cfs_bandwidth(cfs_b);
5286
5287 if (cfs_b->runtime > 0) {
5288 amount = min(cfs_b->runtime, min_amount);
5289 cfs_b->runtime -= amount;
5290 cfs_b->idle = 0;
5291 }
5292 }
5293
5294 cfs_rq->runtime_remaining += amount;
5295
5296 return cfs_rq->runtime_remaining > 0;
5297 }
5298
5299 /* returns 0 on failure to allocate runtime */
5300 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5301 {
5302 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5303 int ret;
5304
5305 raw_spin_lock(&cfs_b->lock);
5306 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
5307 raw_spin_unlock(&cfs_b->lock);
5308
5309 return ret;
5310 }
5311
5312 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5313 {
5314 /* dock delta_exec before expiring quota (as it could span periods) */
5315 cfs_rq->runtime_remaining -= delta_exec;
5316
5317 if (likely(cfs_rq->runtime_remaining > 0))
5318 return;
5319
5320 if (cfs_rq->throttled)
5321 return;
5322 /*
5323 * if we're unable to extend our runtime we resched so that the active
5324 * hierarchy can be throttled
5325 */
5326 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5327 resched_curr(rq_of(cfs_rq));
5328 }
5329
5330 static __always_inline
5331 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5332 {
5333 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
5334 return;
5335
5336 __account_cfs_rq_runtime(cfs_rq, delta_exec);
5337 }
5338
5339 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5340 {
5341 return cfs_bandwidth_used() && cfs_rq->throttled;
5342 }
5343
5344 /* check whether cfs_rq, or any parent, is throttled */
5345 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5346 {
5347 return cfs_bandwidth_used() && cfs_rq->throttle_count;
5348 }
5349
5350 /*
5351 * Ensure that neither of the group entities corresponding to src_cpu or
5352 * dest_cpu are members of a throttled hierarchy when performing group
5353 * load-balance operations.
5354 */
5355 static inline int throttled_lb_pair(struct task_group *tg,
5356 int src_cpu, int dest_cpu)
5357 {
5358 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
5359
5360 src_cfs_rq = tg->cfs_rq[src_cpu];
5361 dest_cfs_rq = tg->cfs_rq[dest_cpu];
5362
5363 return throttled_hierarchy(src_cfs_rq) ||
5364 throttled_hierarchy(dest_cfs_rq);
5365 }
5366
5367 static int tg_unthrottle_up(struct task_group *tg, void *data)
5368 {
5369 struct rq *rq = data;
5370 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5371
5372 cfs_rq->throttle_count--;
5373 if (!cfs_rq->throttle_count) {
5374 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
5375 cfs_rq->throttled_clock_pelt;
5376
5377 /* Add cfs_rq with load or one or more already running entities to the list */
5378 if (!cfs_rq_is_decayed(cfs_rq))
5379 list_add_leaf_cfs_rq(cfs_rq);
5380 }
5381
5382 return 0;
5383 }
5384
5385 static int tg_throttle_down(struct task_group *tg, void *data)
5386 {
5387 struct rq *rq = data;
5388 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5389
5390 /* group is entering throttled state, stop time */
5391 if (!cfs_rq->throttle_count) {
5392 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
5393 list_del_leaf_cfs_rq(cfs_rq);
5394 }
5395 cfs_rq->throttle_count++;
5396
5397 return 0;
5398 }
5399
5400 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5401 {
5402 struct rq *rq = rq_of(cfs_rq);
5403 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5404 struct sched_entity *se;
5405 long task_delta, idle_task_delta, dequeue = 1;
5406
5407 raw_spin_lock(&cfs_b->lock);
5408 /* This will start the period timer if necessary */
5409 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
5410 /*
5411 * We have raced with bandwidth becoming available, and if we
5412 * actually throttled the timer might not unthrottle us for an
5413 * entire period. We additionally needed to make sure that any
5414 * subsequent check_cfs_rq_runtime calls agree not to throttle
5415 * us, as we may commit to do cfs put_prev+pick_next, so we ask
5416 * for 1ns of runtime rather than just check cfs_b.
5417 */
5418 dequeue = 0;
5419 } else {
5420 list_add_tail_rcu(&cfs_rq->throttled_list,
5421 &cfs_b->throttled_cfs_rq);
5422 }
5423 raw_spin_unlock(&cfs_b->lock);
5424
5425 if (!dequeue)
5426 return false; /* Throttle no longer required. */
5427
5428 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5429
5430 /* freeze hierarchy runnable averages while throttled */
5431 rcu_read_lock();
5432 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5433 rcu_read_unlock();
5434
5435 task_delta = cfs_rq->h_nr_running;
5436 idle_task_delta = cfs_rq->idle_h_nr_running;
5437 for_each_sched_entity(se) {
5438 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5439 /* throttled entity or throttle-on-deactivate */
5440 if (!se->on_rq)
5441 goto done;
5442
5443 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5444
5445 if (cfs_rq_is_idle(group_cfs_rq(se)))
5446 idle_task_delta = cfs_rq->h_nr_running;
5447
5448 qcfs_rq->h_nr_running -= task_delta;
5449 qcfs_rq->idle_h_nr_running -= idle_task_delta;
5450
5451 if (qcfs_rq->load.weight) {
5452 /* Avoid re-evaluating load for this entity: */
5453 se = parent_entity(se);
5454 break;
5455 }
5456 }
5457
5458 for_each_sched_entity(se) {
5459 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5460 /* throttled entity or throttle-on-deactivate */
5461 if (!se->on_rq)
5462 goto done;
5463
5464 update_load_avg(qcfs_rq, se, 0);
5465 se_update_runnable(se);
5466
5467 if (cfs_rq_is_idle(group_cfs_rq(se)))
5468 idle_task_delta = cfs_rq->h_nr_running;
5469
5470 qcfs_rq->h_nr_running -= task_delta;
5471 qcfs_rq->idle_h_nr_running -= idle_task_delta;
5472 }
5473
5474 /* At this point se is NULL and we are at root level*/
5475 sub_nr_running(rq, task_delta);
5476
5477 done:
5478 /*
5479 * Note: distribution will already see us throttled via the
5480 * throttled-list. rq->lock protects completion.
5481 */
5482 cfs_rq->throttled = 1;
5483 cfs_rq->throttled_clock = rq_clock(rq);
5484 return true;
5485 }
5486
5487 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5488 {
5489 struct rq *rq = rq_of(cfs_rq);
5490 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5491 struct sched_entity *se;
5492 long task_delta, idle_task_delta;
5493
5494 se = cfs_rq->tg->se[cpu_of(rq)];
5495
5496 cfs_rq->throttled = 0;
5497
5498 update_rq_clock(rq);
5499
5500 raw_spin_lock(&cfs_b->lock);
5501 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5502 list_del_rcu(&cfs_rq->throttled_list);
5503 raw_spin_unlock(&cfs_b->lock);
5504
5505 /* update hierarchical throttle state */
5506 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5507
5508 if (!cfs_rq->load.weight) {
5509 if (!cfs_rq->on_list)
5510 return;
5511 /*
5512 * Nothing to run but something to decay (on_list)?
5513 * Complete the branch.
5514 */
5515 for_each_sched_entity(se) {
5516 if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
5517 break;
5518 }
5519 goto unthrottle_throttle;
5520 }
5521
5522 task_delta = cfs_rq->h_nr_running;
5523 idle_task_delta = cfs_rq->idle_h_nr_running;
5524 for_each_sched_entity(se) {
5525 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5526
5527 if (se->on_rq)
5528 break;
5529 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
5530
5531 if (cfs_rq_is_idle(group_cfs_rq(se)))
5532 idle_task_delta = cfs_rq->h_nr_running;
5533
5534 qcfs_rq->h_nr_running += task_delta;
5535 qcfs_rq->idle_h_nr_running += idle_task_delta;
5536
5537 /* end evaluation on encountering a throttled cfs_rq */
5538 if (cfs_rq_throttled(qcfs_rq))
5539 goto unthrottle_throttle;
5540 }
5541
5542 for_each_sched_entity(se) {
5543 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5544
5545 update_load_avg(qcfs_rq, se, UPDATE_TG);
5546 se_update_runnable(se);
5547
5548 if (cfs_rq_is_idle(group_cfs_rq(se)))
5549 idle_task_delta = cfs_rq->h_nr_running;
5550
5551 qcfs_rq->h_nr_running += task_delta;
5552 qcfs_rq->idle_h_nr_running += idle_task_delta;
5553
5554 /* end evaluation on encountering a throttled cfs_rq */
5555 if (cfs_rq_throttled(qcfs_rq))
5556 goto unthrottle_throttle;
5557 }
5558
5559 /* At this point se is NULL and we are at root level*/
5560 add_nr_running(rq, task_delta);
5561
5562 unthrottle_throttle:
5563 assert_list_leaf_cfs_rq(rq);
5564
5565 /* Determine whether we need to wake up potentially idle CPU: */
5566 if (rq->curr == rq->idle && rq->cfs.nr_running)
5567 resched_curr(rq);
5568 }
5569
5570 #ifdef CONFIG_SMP
5571 static void __cfsb_csd_unthrottle(void *arg)
5572 {
5573 struct cfs_rq *cursor, *tmp;
5574 struct rq *rq = arg;
5575 struct rq_flags rf;
5576
5577 rq_lock(rq, &rf);
5578
5579 /*
5580 * Since we hold rq lock we're safe from concurrent manipulation of
5581 * the CSD list. However, this RCU critical section annotates the
5582 * fact that we pair with sched_free_group_rcu(), so that we cannot
5583 * race with group being freed in the window between removing it
5584 * from the list and advancing to the next entry in the list.
5585 */
5586 rcu_read_lock();
5587
5588 list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
5589 throttled_csd_list) {
5590 list_del_init(&cursor->throttled_csd_list);
5591
5592 if (cfs_rq_throttled(cursor))
5593 unthrottle_cfs_rq(cursor);
5594 }
5595
5596 rcu_read_unlock();
5597
5598 rq_unlock(rq, &rf);
5599 }
5600
5601 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5602 {
5603 struct rq *rq = rq_of(cfs_rq);
5604 bool first;
5605
5606 if (rq == this_rq()) {
5607 unthrottle_cfs_rq(cfs_rq);
5608 return;
5609 }
5610
5611 /* Already enqueued */
5612 if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list)))
5613 return;
5614
5615 first = list_empty(&rq->cfsb_csd_list);
5616 list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
5617 if (first)
5618 smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
5619 }
5620 #else
5621 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5622 {
5623 unthrottle_cfs_rq(cfs_rq);
5624 }
5625 #endif
5626
5627 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5628 {
5629 lockdep_assert_rq_held(rq_of(cfs_rq));
5630
5631 if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) ||
5632 cfs_rq->runtime_remaining <= 0))
5633 return;
5634
5635 __unthrottle_cfs_rq_async(cfs_rq);
5636 }
5637
5638 static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
5639 {
5640 struct cfs_rq *local_unthrottle = NULL;
5641 int this_cpu = smp_processor_id();
5642 u64 runtime, remaining = 1;
5643 bool throttled = false;
5644 struct cfs_rq *cfs_rq;
5645 struct rq_flags rf;
5646 struct rq *rq;
5647
5648 rcu_read_lock();
5649 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
5650 throttled_list) {
5651 rq = rq_of(cfs_rq);
5652
5653 if (!remaining) {
5654 throttled = true;
5655 break;
5656 }
5657
5658 rq_lock_irqsave(rq, &rf);
5659 if (!cfs_rq_throttled(cfs_rq))
5660 goto next;
5661
5662 #ifdef CONFIG_SMP
5663 /* Already queued for async unthrottle */
5664 if (!list_empty(&cfs_rq->throttled_csd_list))
5665 goto next;
5666 #endif
5667
5668 /* By the above checks, this should never be true */
5669 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
5670
5671 raw_spin_lock(&cfs_b->lock);
5672 runtime = -cfs_rq->runtime_remaining + 1;
5673 if (runtime > cfs_b->runtime)
5674 runtime = cfs_b->runtime;
5675 cfs_b->runtime -= runtime;
5676 remaining = cfs_b->runtime;
5677 raw_spin_unlock(&cfs_b->lock);
5678
5679 cfs_rq->runtime_remaining += runtime;
5680
5681 /* we check whether we're throttled above */
5682 if (cfs_rq->runtime_remaining > 0) {
5683 if (cpu_of(rq) != this_cpu ||
5684 SCHED_WARN_ON(local_unthrottle))
5685 unthrottle_cfs_rq_async(cfs_rq);
5686 else
5687 local_unthrottle = cfs_rq;
5688 } else {
5689 throttled = true;
5690 }
5691
5692 next:
5693 rq_unlock_irqrestore(rq, &rf);
5694 }
5695 rcu_read_unlock();
5696
5697 if (local_unthrottle) {
5698 rq = cpu_rq(this_cpu);
5699 rq_lock_irqsave(rq, &rf);
5700 if (cfs_rq_throttled(local_unthrottle))
5701 unthrottle_cfs_rq(local_unthrottle);
5702 rq_unlock_irqrestore(rq, &rf);
5703 }
5704
5705 return throttled;
5706 }
5707
5708 /*
5709 * Responsible for refilling a task_group's bandwidth and unthrottling its
5710 * cfs_rqs as appropriate. If there has been no activity within the last
5711 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5712 * used to track this state.
5713 */
5714 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
5715 {
5716 int throttled;
5717
5718 /* no need to continue the timer with no bandwidth constraint */
5719 if (cfs_b->quota == RUNTIME_INF)
5720 goto out_deactivate;
5721
5722 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
5723 cfs_b->nr_periods += overrun;
5724
5725 /* Refill extra burst quota even if cfs_b->idle */
5726 __refill_cfs_bandwidth_runtime(cfs_b);
5727
5728 /*
5729 * idle depends on !throttled (for the case of a large deficit), and if
5730 * we're going inactive then everything else can be deferred
5731 */
5732 if (cfs_b->idle && !throttled)
5733 goto out_deactivate;
5734
5735 if (!throttled) {
5736 /* mark as potentially idle for the upcoming period */
5737 cfs_b->idle = 1;
5738 return 0;
5739 }
5740
5741 /* account preceding periods in which throttling occurred */
5742 cfs_b->nr_throttled += overrun;
5743
5744 /*
5745 * This check is repeated as we release cfs_b->lock while we unthrottle.
5746 */
5747 while (throttled && cfs_b->runtime > 0) {
5748 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5749 /* we can't nest cfs_b->lock while distributing bandwidth */
5750 throttled = distribute_cfs_runtime(cfs_b);
5751 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5752 }
5753
5754 /*
5755 * While we are ensured activity in the period following an
5756 * unthrottle, this also covers the case in which the new bandwidth is
5757 * insufficient to cover the existing bandwidth deficit. (Forcing the
5758 * timer to remain active while there are any throttled entities.)
5759 */
5760 cfs_b->idle = 0;
5761
5762 return 0;
5763
5764 out_deactivate:
5765 return 1;
5766 }
5767
5768 /* a cfs_rq won't donate quota below this amount */
5769 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
5770 /* minimum remaining period time to redistribute slack quota */
5771 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
5772 /* how long we wait to gather additional slack before distributing */
5773 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
5774
5775 /*
5776 * Are we near the end of the current quota period?
5777 *
5778 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
5779 * hrtimer base being cleared by hrtimer_start. In the case of
5780 * migrate_hrtimers, base is never cleared, so we are fine.
5781 */
5782 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
5783 {
5784 struct hrtimer *refresh_timer = &cfs_b->period_timer;
5785 s64 remaining;
5786
5787 /* if the call-back is running a quota refresh is already occurring */
5788 if (hrtimer_callback_running(refresh_timer))
5789 return 1;
5790
5791 /* is a quota refresh about to occur? */
5792 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
5793 if (remaining < (s64)min_expire)
5794 return 1;
5795
5796 return 0;
5797 }
5798
5799 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
5800 {
5801 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
5802
5803 /* if there's a quota refresh soon don't bother with slack */
5804 if (runtime_refresh_within(cfs_b, min_left))
5805 return;
5806
5807 /* don't push forwards an existing deferred unthrottle */
5808 if (cfs_b->slack_started)
5809 return;
5810 cfs_b->slack_started = true;
5811
5812 hrtimer_start(&cfs_b->slack_timer,
5813 ns_to_ktime(cfs_bandwidth_slack_period),
5814 HRTIMER_MODE_REL);
5815 }
5816
5817 /* we know any runtime found here is valid as update_curr() precedes return */
5818 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5819 {
5820 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5821 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
5822
5823 if (slack_runtime <= 0)
5824 return;
5825
5826 raw_spin_lock(&cfs_b->lock);
5827 if (cfs_b->quota != RUNTIME_INF) {
5828 cfs_b->runtime += slack_runtime;
5829
5830 /* we are under rq->lock, defer unthrottling using a timer */
5831 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
5832 !list_empty(&cfs_b->throttled_cfs_rq))
5833 start_cfs_slack_bandwidth(cfs_b);
5834 }
5835 raw_spin_unlock(&cfs_b->lock);
5836
5837 /* even if it's not valid for return we don't want to try again */
5838 cfs_rq->runtime_remaining -= slack_runtime;
5839 }
5840
5841 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5842 {
5843 if (!cfs_bandwidth_used())
5844 return;
5845
5846 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
5847 return;
5848
5849 __return_cfs_rq_runtime(cfs_rq);
5850 }
5851
5852 /*
5853 * This is done with a timer (instead of inline with bandwidth return) since
5854 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
5855 */
5856 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
5857 {
5858 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
5859 unsigned long flags;
5860
5861 /* confirm we're still not at a refresh boundary */
5862 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5863 cfs_b->slack_started = false;
5864
5865 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
5866 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5867 return;
5868 }
5869
5870 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
5871 runtime = cfs_b->runtime;
5872
5873 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
5874
5875 if (!runtime)
5876 return;
5877
5878 distribute_cfs_runtime(cfs_b);
5879 }
5880
5881 /*
5882 * When a group wakes up we want to make sure that its quota is not already
5883 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
5884 * runtime as update_curr() throttling can not trigger until it's on-rq.
5885 */
5886 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
5887 {
5888 if (!cfs_bandwidth_used())
5889 return;
5890
5891 /* an active group must be handled by the update_curr()->put() path */
5892 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
5893 return;
5894
5895 /* ensure the group is not already throttled */
5896 if (cfs_rq_throttled(cfs_rq))
5897 return;
5898
5899 /* update runtime allocation */
5900 account_cfs_rq_runtime(cfs_rq, 0);
5901 if (cfs_rq->runtime_remaining <= 0)
5902 throttle_cfs_rq(cfs_rq);
5903 }
5904
5905 static void sync_throttle(struct task_group *tg, int cpu)
5906 {
5907 struct cfs_rq *pcfs_rq, *cfs_rq;
5908
5909 if (!cfs_bandwidth_used())
5910 return;
5911
5912 if (!tg->parent)
5913 return;
5914
5915 cfs_rq = tg->cfs_rq[cpu];
5916 pcfs_rq = tg->parent->cfs_rq[cpu];
5917
5918 cfs_rq->throttle_count = pcfs_rq->throttle_count;
5919 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
5920 }
5921
5922 /* conditionally throttle active cfs_rq's from put_prev_entity() */
5923 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5924 {
5925 if (!cfs_bandwidth_used())
5926 return false;
5927
5928 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
5929 return false;
5930
5931 /*
5932 * it's possible for a throttled entity to be forced into a running
5933 * state (e.g. set_curr_task), in this case we're finished.
5934 */
5935 if (cfs_rq_throttled(cfs_rq))
5936 return true;
5937
5938 return throttle_cfs_rq(cfs_rq);
5939 }
5940
5941 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
5942 {
5943 struct cfs_bandwidth *cfs_b =
5944 container_of(timer, struct cfs_bandwidth, slack_timer);
5945
5946 do_sched_cfs_slack_timer(cfs_b);
5947
5948 return HRTIMER_NORESTART;
5949 }
5950
5951 extern const u64 max_cfs_quota_period;
5952
5953 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
5954 {
5955 struct cfs_bandwidth *cfs_b =
5956 container_of(timer, struct cfs_bandwidth, period_timer);
5957 unsigned long flags;
5958 int overrun;
5959 int idle = 0;
5960 int count = 0;
5961
5962 raw_spin_lock_irqsave(&cfs_b->lock, flags);
5963 for (;;) {
5964 overrun = hrtimer_forward_now(timer, cfs_b->period);
5965 if (!overrun)
5966 break;
5967
5968 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
5969
5970 if (++count > 3) {
5971 u64 new, old = ktime_to_ns(cfs_b->period);
5972
5973 /*
5974 * Grow period by a factor of 2 to avoid losing precision.
5975 * Precision loss in the quota/period ratio can cause __cfs_schedulable
5976 * to fail.
5977 */
5978 new = old * 2;
5979 if (new < max_cfs_quota_period) {
5980 cfs_b->period = ns_to_ktime(new);
5981 cfs_b->quota *= 2;
5982 cfs_b->burst *= 2;
5983
5984 pr_warn_ratelimited(
5985 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5986 smp_processor_id(),
5987 div_u64(new, NSEC_PER_USEC),
5988 div_u64(cfs_b->quota, NSEC_PER_USEC));
5989 } else {
5990 pr_warn_ratelimited(
5991 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5992 smp_processor_id(),
5993 div_u64(old, NSEC_PER_USEC),
5994 div_u64(cfs_b->quota, NSEC_PER_USEC));
5995 }
5996
5997 /* reset count so we don't come right back in here */
5998 count = 0;
5999 }
6000 }
6001 if (idle)
6002 cfs_b->period_active = 0;
6003 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6004
6005 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
6006 }
6007
6008 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6009 {
6010 raw_spin_lock_init(&cfs_b->lock);
6011 cfs_b->runtime = 0;
6012 cfs_b->quota = RUNTIME_INF;
6013 cfs_b->period = ns_to_ktime(default_cfs_period());
6014 cfs_b->burst = 0;
6015
6016 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
6017 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6018 cfs_b->period_timer.function = sched_cfs_period_timer;
6019 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6020 cfs_b->slack_timer.function = sched_cfs_slack_timer;
6021 cfs_b->slack_started = false;
6022 }
6023
6024 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6025 {
6026 cfs_rq->runtime_enabled = 0;
6027 INIT_LIST_HEAD(&cfs_rq->throttled_list);
6028 #ifdef CONFIG_SMP
6029 INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
6030 #endif
6031 }
6032
6033 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6034 {
6035 lockdep_assert_held(&cfs_b->lock);
6036
6037 if (cfs_b->period_active)
6038 return;
6039
6040 cfs_b->period_active = 1;
6041 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
6042 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
6043 }
6044
6045 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6046 {
6047 int __maybe_unused i;
6048
6049 /* init_cfs_bandwidth() was not called */
6050 if (!cfs_b->throttled_cfs_rq.next)
6051 return;
6052
6053 hrtimer_cancel(&cfs_b->period_timer);
6054 hrtimer_cancel(&cfs_b->slack_timer);
6055
6056 /*
6057 * It is possible that we still have some cfs_rq's pending on a CSD
6058 * list, though this race is very rare. In order for this to occur, we
6059 * must have raced with the last task leaving the group while there
6060 * exist throttled cfs_rq(s), and the period_timer must have queued the
6061 * CSD item but the remote cpu has not yet processed it. To handle this,
6062 * we can simply flush all pending CSD work inline here. We're
6063 * guaranteed at this point that no additional cfs_rq of this group can
6064 * join a CSD list.
6065 */
6066 #ifdef CONFIG_SMP
6067 for_each_possible_cpu(i) {
6068 struct rq *rq = cpu_rq(i);
6069 unsigned long flags;
6070
6071 if (list_empty(&rq->cfsb_csd_list))
6072 continue;
6073
6074 local_irq_save(flags);
6075 __cfsb_csd_unthrottle(rq);
6076 local_irq_restore(flags);
6077 }
6078 #endif
6079 }
6080
6081 /*
6082 * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
6083 *
6084 * The race is harmless, since modifying bandwidth settings of unhooked group
6085 * bits doesn't do much.
6086 */
6087
6088 /* cpu online callback */
6089 static void __maybe_unused update_runtime_enabled(struct rq *rq)
6090 {
6091 struct task_group *tg;
6092
6093 lockdep_assert_rq_held(rq);
6094
6095 rcu_read_lock();
6096 list_for_each_entry_rcu(tg, &task_groups, list) {
6097 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
6098 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6099
6100 raw_spin_lock(&cfs_b->lock);
6101 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
6102 raw_spin_unlock(&cfs_b->lock);
6103 }
6104 rcu_read_unlock();
6105 }
6106
6107 /* cpu offline callback */
6108 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
6109 {
6110 struct task_group *tg;
6111
6112 lockdep_assert_rq_held(rq);
6113
6114 rcu_read_lock();
6115 list_for_each_entry_rcu(tg, &task_groups, list) {
6116 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6117
6118 if (!cfs_rq->runtime_enabled)
6119 continue;
6120
6121 /*
6122 * clock_task is not advancing so we just need to make sure
6123 * there's some valid quota amount
6124 */
6125 cfs_rq->runtime_remaining = 1;
6126 /*
6127 * Offline rq is schedulable till CPU is completely disabled
6128 * in take_cpu_down(), so we prevent new cfs throttling here.
6129 */
6130 cfs_rq->runtime_enabled = 0;
6131
6132 if (cfs_rq_throttled(cfs_rq))
6133 unthrottle_cfs_rq(cfs_rq);
6134 }
6135 rcu_read_unlock();
6136 }
6137
6138 #else /* CONFIG_CFS_BANDWIDTH */
6139
6140 static inline bool cfs_bandwidth_used(void)
6141 {
6142 return false;
6143 }
6144
6145 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
6146 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
6147 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6148 static inline void sync_throttle(struct task_group *tg, int cpu) {}
6149 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
6150
6151 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
6152 {
6153 return 0;
6154 }
6155
6156 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
6157 {
6158 return 0;
6159 }
6160
6161 static inline int throttled_lb_pair(struct task_group *tg,
6162 int src_cpu, int dest_cpu)
6163 {
6164 return 0;
6165 }
6166
6167 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
6168
6169 #ifdef CONFIG_FAIR_GROUP_SCHED
6170 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
6171 #endif
6172
6173 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
6174 {
6175 return NULL;
6176 }
6177 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
6178 static inline void update_runtime_enabled(struct rq *rq) {}
6179 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
6180
6181 #endif /* CONFIG_CFS_BANDWIDTH */
6182
6183 /**************************************************
6184 * CFS operations on tasks:
6185 */
6186
6187 #ifdef CONFIG_SCHED_HRTICK
6188 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
6189 {
6190 struct sched_entity *se = &p->se;
6191 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6192
6193 SCHED_WARN_ON(task_rq(p) != rq);
6194
6195 if (rq->cfs.h_nr_running > 1) {
6196 u64 slice = sched_slice(cfs_rq, se);
6197 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
6198 s64 delta = slice - ran;
6199
6200 if (delta < 0) {
6201 if (task_current(rq, p))
6202 resched_curr(rq);
6203 return;
6204 }
6205 hrtick_start(rq, delta);
6206 }
6207 }
6208
6209 /*
6210 * called from enqueue/dequeue and updates the hrtick when the
6211 * current task is from our class and nr_running is low enough
6212 * to matter.
6213 */
6214 static void hrtick_update(struct rq *rq)
6215 {
6216 struct task_struct *curr = rq->curr;
6217
6218 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
6219 return;
6220
6221 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
6222 hrtick_start_fair(rq, curr);
6223 }
6224 #else /* !CONFIG_SCHED_HRTICK */
6225 static inline void
6226 hrtick_start_fair(struct rq *rq, struct task_struct *p)
6227 {
6228 }
6229
6230 static inline void hrtick_update(struct rq *rq)
6231 {
6232 }
6233 #endif
6234
6235 #ifdef CONFIG_SMP
6236 static inline bool cpu_overutilized(int cpu)
6237 {
6238 unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
6239 unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
6240
6241 /* Return true only if the utilization doesn't fit CPU's capacity */
6242 return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
6243 }
6244
6245 static inline void update_overutilized_status(struct rq *rq)
6246 {
6247 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
6248 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
6249 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
6250 }
6251 }
6252 #else
6253 static inline void update_overutilized_status(struct rq *rq) { }
6254 #endif
6255
6256 /* Runqueue only has SCHED_IDLE tasks enqueued */
6257 static int sched_idle_rq(struct rq *rq)
6258 {
6259 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
6260 rq->nr_running);
6261 }
6262
6263 /*
6264 * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use
6265 * of idle_nr_running, which does not consider idle descendants of normal
6266 * entities.
6267 */
6268 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq)
6269 {
6270 return cfs_rq->nr_running &&
6271 cfs_rq->nr_running == cfs_rq->idle_nr_running;
6272 }
6273
6274 #ifdef CONFIG_SMP
6275 static int sched_idle_cpu(int cpu)
6276 {
6277 return sched_idle_rq(cpu_rq(cpu));
6278 }
6279 #endif
6280
6281 /*
6282 * The enqueue_task method is called before nr_running is
6283 * increased. Here we update the fair scheduling stats and
6284 * then put the task into the rbtree:
6285 */
6286 static void
6287 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6288 {
6289 struct cfs_rq *cfs_rq;
6290 struct sched_entity *se = &p->se;
6291 int idle_h_nr_running = task_has_idle_policy(p);
6292 int task_new = !(flags & ENQUEUE_WAKEUP);
6293
6294 /*
6295 * The code below (indirectly) updates schedutil which looks at
6296 * the cfs_rq utilization to select a frequency.
6297 * Let's add the task's estimated utilization to the cfs_rq's
6298 * estimated utilization, before we update schedutil.
6299 */
6300 util_est_enqueue(&rq->cfs, p);
6301
6302 /*
6303 * If in_iowait is set, the code below may not trigger any cpufreq
6304 * utilization updates, so do it here explicitly with the IOWAIT flag
6305 * passed.
6306 */
6307 if (p->in_iowait)
6308 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
6309
6310 for_each_sched_entity(se) {
6311 if (se->on_rq)
6312 break;
6313 cfs_rq = cfs_rq_of(se);
6314 enqueue_entity(cfs_rq, se, flags);
6315
6316 cfs_rq->h_nr_running++;
6317 cfs_rq->idle_h_nr_running += idle_h_nr_running;
6318
6319 if (cfs_rq_is_idle(cfs_rq))
6320 idle_h_nr_running = 1;
6321
6322 /* end evaluation on encountering a throttled cfs_rq */
6323 if (cfs_rq_throttled(cfs_rq))
6324 goto enqueue_throttle;
6325
6326 flags = ENQUEUE_WAKEUP;
6327 }
6328
6329 for_each_sched_entity(se) {
6330 cfs_rq = cfs_rq_of(se);
6331
6332 update_load_avg(cfs_rq, se, UPDATE_TG);
6333 se_update_runnable(se);
6334 update_cfs_group(se);
6335
6336 cfs_rq->h_nr_running++;
6337 cfs_rq->idle_h_nr_running += idle_h_nr_running;
6338
6339 if (cfs_rq_is_idle(cfs_rq))
6340 idle_h_nr_running = 1;
6341
6342 /* end evaluation on encountering a throttled cfs_rq */
6343 if (cfs_rq_throttled(cfs_rq))
6344 goto enqueue_throttle;
6345 }
6346
6347 /* At this point se is NULL and we are at root level*/
6348 add_nr_running(rq, 1);
6349
6350 /*
6351 * Since new tasks are assigned an initial util_avg equal to
6352 * half of the spare capacity of their CPU, tiny tasks have the
6353 * ability to cross the overutilized threshold, which will
6354 * result in the load balancer ruining all the task placement
6355 * done by EAS. As a way to mitigate that effect, do not account
6356 * for the first enqueue operation of new tasks during the
6357 * overutilized flag detection.
6358 *
6359 * A better way of solving this problem would be to wait for
6360 * the PELT signals of tasks to converge before taking them
6361 * into account, but that is not straightforward to implement,
6362 * and the following generally works well enough in practice.
6363 */
6364 if (!task_new)
6365 update_overutilized_status(rq);
6366
6367 enqueue_throttle:
6368 assert_list_leaf_cfs_rq(rq);
6369
6370 hrtick_update(rq);
6371 }
6372
6373 static void set_next_buddy(struct sched_entity *se);
6374
6375 /*
6376 * The dequeue_task method is called before nr_running is
6377 * decreased. We remove the task from the rbtree and
6378 * update the fair scheduling stats:
6379 */
6380 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6381 {
6382 struct cfs_rq *cfs_rq;
6383 struct sched_entity *se = &p->se;
6384 int task_sleep = flags & DEQUEUE_SLEEP;
6385 int idle_h_nr_running = task_has_idle_policy(p);
6386 bool was_sched_idle = sched_idle_rq(rq);
6387
6388 util_est_dequeue(&rq->cfs, p);
6389
6390 for_each_sched_entity(se) {
6391 cfs_rq = cfs_rq_of(se);
6392 dequeue_entity(cfs_rq, se, flags);
6393
6394 cfs_rq->h_nr_running--;
6395 cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6396
6397 if (cfs_rq_is_idle(cfs_rq))
6398 idle_h_nr_running = 1;
6399
6400 /* end evaluation on encountering a throttled cfs_rq */
6401 if (cfs_rq_throttled(cfs_rq))
6402 goto dequeue_throttle;
6403
6404 /* Don't dequeue parent if it has other entities besides us */
6405 if (cfs_rq->load.weight) {
6406 /* Avoid re-evaluating load for this entity: */
6407 se = parent_entity(se);
6408 /*
6409 * Bias pick_next to pick a task from this cfs_rq, as
6410 * p is sleeping when it is within its sched_slice.
6411 */
6412 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
6413 set_next_buddy(se);
6414 break;
6415 }
6416 flags |= DEQUEUE_SLEEP;
6417 }
6418
6419 for_each_sched_entity(se) {
6420 cfs_rq = cfs_rq_of(se);
6421
6422 update_load_avg(cfs_rq, se, UPDATE_TG);
6423 se_update_runnable(se);
6424 update_cfs_group(se);
6425
6426 cfs_rq->h_nr_running--;
6427 cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6428
6429 if (cfs_rq_is_idle(cfs_rq))
6430 idle_h_nr_running = 1;
6431
6432 /* end evaluation on encountering a throttled cfs_rq */
6433 if (cfs_rq_throttled(cfs_rq))
6434 goto dequeue_throttle;
6435
6436 }
6437
6438 /* At this point se is NULL and we are at root level*/
6439 sub_nr_running(rq, 1);
6440
6441 /* balance early to pull high priority tasks */
6442 if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
6443 rq->next_balance = jiffies;
6444
6445 dequeue_throttle:
6446 util_est_update(&rq->cfs, p, task_sleep);
6447 hrtick_update(rq);
6448 }
6449
6450 #ifdef CONFIG_SMP
6451
6452 /* Working cpumask for: load_balance, load_balance_newidle. */
6453 static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6454 static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
6455
6456 #ifdef CONFIG_NO_HZ_COMMON
6457
6458 static struct {
6459 cpumask_var_t idle_cpus_mask;
6460 atomic_t nr_cpus;
6461 int has_blocked; /* Idle CPUS has blocked load */
6462 int needs_update; /* Newly idle CPUs need their next_balance collated */
6463 unsigned long next_balance; /* in jiffy units */
6464 unsigned long next_blocked; /* Next update of blocked load in jiffies */
6465 } nohz ____cacheline_aligned;
6466
6467 #endif /* CONFIG_NO_HZ_COMMON */
6468
6469 static unsigned long cpu_load(struct rq *rq)
6470 {
6471 return cfs_rq_load_avg(&rq->cfs);
6472 }
6473
6474 /*
6475 * cpu_load_without - compute CPU load without any contributions from *p
6476 * @cpu: the CPU which load is requested
6477 * @p: the task which load should be discounted
6478 *
6479 * The load of a CPU is defined by the load of tasks currently enqueued on that
6480 * CPU as well as tasks which are currently sleeping after an execution on that
6481 * CPU.
6482 *
6483 * This method returns the load of the specified CPU by discounting the load of
6484 * the specified task, whenever the task is currently contributing to the CPU
6485 * load.
6486 */
6487 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
6488 {
6489 struct cfs_rq *cfs_rq;
6490 unsigned int load;
6491
6492 /* Task has no contribution or is new */
6493 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6494 return cpu_load(rq);
6495
6496 cfs_rq = &rq->cfs;
6497 load = READ_ONCE(cfs_rq->avg.load_avg);
6498
6499 /* Discount task's util from CPU's util */
6500 lsub_positive(&load, task_h_load(p));
6501
6502 return load;
6503 }
6504
6505 static unsigned long cpu_runnable(struct rq *rq)
6506 {
6507 return cfs_rq_runnable_avg(&rq->cfs);
6508 }
6509
6510 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
6511 {
6512 struct cfs_rq *cfs_rq;
6513 unsigned int runnable;
6514
6515 /* Task has no contribution or is new */
6516 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6517 return cpu_runnable(rq);
6518
6519 cfs_rq = &rq->cfs;
6520 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
6521
6522 /* Discount task's runnable from CPU's runnable */
6523 lsub_positive(&runnable, p->se.avg.runnable_avg);
6524
6525 return runnable;
6526 }
6527
6528 static unsigned long capacity_of(int cpu)
6529 {
6530 return cpu_rq(cpu)->cpu_capacity;
6531 }
6532
6533 static void record_wakee(struct task_struct *p)
6534 {
6535 /*
6536 * Only decay a single time; tasks that have less then 1 wakeup per
6537 * jiffy will not have built up many flips.
6538 */
6539 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
6540 current->wakee_flips >>= 1;
6541 current->wakee_flip_decay_ts = jiffies;
6542 }
6543
6544 if (current->last_wakee != p) {
6545 current->last_wakee = p;
6546 current->wakee_flips++;
6547 }
6548 }
6549
6550 /*
6551 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
6552 *
6553 * A waker of many should wake a different task than the one last awakened
6554 * at a frequency roughly N times higher than one of its wakees.
6555 *
6556 * In order to determine whether we should let the load spread vs consolidating
6557 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
6558 * partner, and a factor of lls_size higher frequency in the other.
6559 *
6560 * With both conditions met, we can be relatively sure that the relationship is
6561 * non-monogamous, with partner count exceeding socket size.
6562 *
6563 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
6564 * whatever is irrelevant, spread criteria is apparent partner count exceeds
6565 * socket size.
6566 */
6567 static int wake_wide(struct task_struct *p)
6568 {
6569 unsigned int master = current->wakee_flips;
6570 unsigned int slave = p->wakee_flips;
6571 int factor = __this_cpu_read(sd_llc_size);
6572
6573 if (master < slave)
6574 swap(master, slave);
6575 if (slave < factor || master < slave * factor)
6576 return 0;
6577 return 1;
6578 }
6579
6580 /*
6581 * The purpose of wake_affine() is to quickly determine on which CPU we can run
6582 * soonest. For the purpose of speed we only consider the waking and previous
6583 * CPU.
6584 *
6585 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
6586 * cache-affine and is (or will be) idle.
6587 *
6588 * wake_affine_weight() - considers the weight to reflect the average
6589 * scheduling latency of the CPUs. This seems to work
6590 * for the overloaded case.
6591 */
6592 static int
6593 wake_affine_idle(int this_cpu, int prev_cpu, int sync)
6594 {
6595 /*
6596 * If this_cpu is idle, it implies the wakeup is from interrupt
6597 * context. Only allow the move if cache is shared. Otherwise an
6598 * interrupt intensive workload could force all tasks onto one
6599 * node depending on the IO topology or IRQ affinity settings.
6600 *
6601 * If the prev_cpu is idle and cache affine then avoid a migration.
6602 * There is no guarantee that the cache hot data from an interrupt
6603 * is more important than cache hot data on the prev_cpu and from
6604 * a cpufreq perspective, it's better to have higher utilisation
6605 * on one CPU.
6606 */
6607 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
6608 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
6609
6610 if (sync && cpu_rq(this_cpu)->nr_running == 1)
6611 return this_cpu;
6612
6613 if (available_idle_cpu(prev_cpu))
6614 return prev_cpu;
6615
6616 return nr_cpumask_bits;
6617 }
6618
6619 static int
6620 wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
6621 int this_cpu, int prev_cpu, int sync)
6622 {
6623 s64 this_eff_load, prev_eff_load;
6624 unsigned long task_load;
6625
6626 this_eff_load = cpu_load(cpu_rq(this_cpu));
6627
6628 if (sync) {
6629 unsigned long current_load = task_h_load(current);
6630
6631 if (current_load > this_eff_load)
6632 return this_cpu;
6633
6634 this_eff_load -= current_load;
6635 }
6636
6637 task_load = task_h_load(p);
6638
6639 this_eff_load += task_load;
6640 if (sched_feat(WA_BIAS))
6641 this_eff_load *= 100;
6642 this_eff_load *= capacity_of(prev_cpu);
6643
6644 prev_eff_load = cpu_load(cpu_rq(prev_cpu));
6645 prev_eff_load -= task_load;
6646 if (sched_feat(WA_BIAS))
6647 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
6648 prev_eff_load *= capacity_of(this_cpu);
6649
6650 /*
6651 * If sync, adjust the weight of prev_eff_load such that if
6652 * prev_eff == this_eff that select_idle_sibling() will consider
6653 * stacking the wakee on top of the waker if no other CPU is
6654 * idle.
6655 */
6656 if (sync)
6657 prev_eff_load += 1;
6658
6659 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
6660 }
6661
6662 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
6663 int this_cpu, int prev_cpu, int sync)
6664 {
6665 int target = nr_cpumask_bits;
6666
6667 if (sched_feat(WA_IDLE))
6668 target = wake_affine_idle(this_cpu, prev_cpu, sync);
6669
6670 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
6671 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
6672
6673 schedstat_inc(p->stats.nr_wakeups_affine_attempts);
6674 if (target == nr_cpumask_bits)
6675 return prev_cpu;
6676
6677 schedstat_inc(sd->ttwu_move_affine);
6678 schedstat_inc(p->stats.nr_wakeups_affine);
6679 return target;
6680 }
6681
6682 static struct sched_group *
6683 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6684
6685 /*
6686 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
6687 */
6688 static int
6689 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
6690 {
6691 unsigned long load, min_load = ULONG_MAX;
6692 unsigned int min_exit_latency = UINT_MAX;
6693 u64 latest_idle_timestamp = 0;
6694 int least_loaded_cpu = this_cpu;
6695 int shallowest_idle_cpu = -1;
6696 int i;
6697
6698 /* Check if we have any choice: */
6699 if (group->group_weight == 1)
6700 return cpumask_first(sched_group_span(group));
6701
6702 /* Traverse only the allowed CPUs */
6703 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
6704 struct rq *rq = cpu_rq(i);
6705
6706 if (!sched_core_cookie_match(rq, p))
6707 continue;
6708
6709 if (sched_idle_cpu(i))
6710 return i;
6711
6712 if (available_idle_cpu(i)) {
6713 struct cpuidle_state *idle = idle_get_state(rq);
6714 if (idle && idle->exit_latency < min_exit_latency) {
6715 /*
6716 * We give priority to a CPU whose idle state
6717 * has the smallest exit latency irrespective
6718 * of any idle timestamp.
6719 */
6720 min_exit_latency = idle->exit_latency;
6721 latest_idle_timestamp = rq->idle_stamp;
6722 shallowest_idle_cpu = i;
6723 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
6724 rq->idle_stamp > latest_idle_timestamp) {
6725 /*
6726 * If equal or no active idle state, then
6727 * the most recently idled CPU might have
6728 * a warmer cache.
6729 */
6730 latest_idle_timestamp = rq->idle_stamp;
6731 shallowest_idle_cpu = i;
6732 }
6733 } else if (shallowest_idle_cpu == -1) {
6734 load = cpu_load(cpu_rq(i));
6735 if (load < min_load) {
6736 min_load = load;
6737 least_loaded_cpu = i;
6738 }
6739 }
6740 }
6741
6742 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
6743 }
6744
6745 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
6746 int cpu, int prev_cpu, int sd_flag)
6747 {
6748 int new_cpu = cpu;
6749
6750 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
6751 return prev_cpu;
6752
6753 /*
6754 * We need task's util for cpu_util_without, sync it up to
6755 * prev_cpu's last_update_time.
6756 */
6757 if (!(sd_flag & SD_BALANCE_FORK))
6758 sync_entity_load_avg(&p->se);
6759
6760 while (sd) {
6761 struct sched_group *group;
6762 struct sched_domain *tmp;
6763 int weight;
6764
6765 if (!(sd->flags & sd_flag)) {
6766 sd = sd->child;
6767 continue;
6768 }
6769
6770 group = find_idlest_group(sd, p, cpu);
6771 if (!group) {
6772 sd = sd->child;
6773 continue;
6774 }
6775
6776 new_cpu = find_idlest_group_cpu(group, p, cpu);
6777 if (new_cpu == cpu) {
6778 /* Now try balancing at a lower domain level of 'cpu': */
6779 sd = sd->child;
6780 continue;
6781 }
6782
6783 /* Now try balancing at a lower domain level of 'new_cpu': */
6784 cpu = new_cpu;
6785 weight = sd->span_weight;
6786 sd = NULL;
6787 for_each_domain(cpu, tmp) {
6788 if (weight <= tmp->span_weight)
6789 break;
6790 if (tmp->flags & sd_flag)
6791 sd = tmp;
6792 }
6793 }
6794
6795 return new_cpu;
6796 }
6797
6798 static inline int __select_idle_cpu(int cpu, struct task_struct *p)
6799 {
6800 if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
6801 sched_cpu_cookie_match(cpu_rq(cpu), p))
6802 return cpu;
6803
6804 return -1;
6805 }
6806
6807 #ifdef CONFIG_SCHED_SMT
6808 DEFINE_STATIC_KEY_FALSE(sched_smt_present);
6809 EXPORT_SYMBOL_GPL(sched_smt_present);
6810
6811 static inline void set_idle_cores(int cpu, int val)
6812 {
6813 struct sched_domain_shared *sds;
6814
6815 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6816 if (sds)
6817 WRITE_ONCE(sds->has_idle_cores, val);
6818 }
6819
6820 static inline bool test_idle_cores(int cpu)
6821 {
6822 struct sched_domain_shared *sds;
6823
6824 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
6825 if (sds)
6826 return READ_ONCE(sds->has_idle_cores);
6827
6828 return false;
6829 }
6830
6831 /*
6832 * Scans the local SMT mask to see if the entire core is idle, and records this
6833 * information in sd_llc_shared->has_idle_cores.
6834 *
6835 * Since SMT siblings share all cache levels, inspecting this limited remote
6836 * state should be fairly cheap.
6837 */
6838 void __update_idle_core(struct rq *rq)
6839 {
6840 int core = cpu_of(rq);
6841 int cpu;
6842
6843 rcu_read_lock();
6844 if (test_idle_cores(core))
6845 goto unlock;
6846
6847 for_each_cpu(cpu, cpu_smt_mask(core)) {
6848 if (cpu == core)
6849 continue;
6850
6851 if (!available_idle_cpu(cpu))
6852 goto unlock;
6853 }
6854
6855 set_idle_cores(core, 1);
6856 unlock:
6857 rcu_read_unlock();
6858 }
6859
6860 /*
6861 * Scan the entire LLC domain for idle cores; this dynamically switches off if
6862 * there are no idle cores left in the system; tracked through
6863 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
6864 */
6865 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
6866 {
6867 bool idle = true;
6868 int cpu;
6869
6870 for_each_cpu(cpu, cpu_smt_mask(core)) {
6871 if (!available_idle_cpu(cpu)) {
6872 idle = false;
6873 if (*idle_cpu == -1) {
6874 if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
6875 *idle_cpu = cpu;
6876 break;
6877 }
6878 continue;
6879 }
6880 break;
6881 }
6882 if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
6883 *idle_cpu = cpu;
6884 }
6885
6886 if (idle)
6887 return core;
6888
6889 cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
6890 return -1;
6891 }
6892
6893 /*
6894 * Scan the local SMT mask for idle CPUs.
6895 */
6896 static int select_idle_smt(struct task_struct *p, int target)
6897 {
6898 int cpu;
6899
6900 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
6901 if (cpu == target)
6902 continue;
6903 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
6904 return cpu;
6905 }
6906
6907 return -1;
6908 }
6909
6910 #else /* CONFIG_SCHED_SMT */
6911
6912 static inline void set_idle_cores(int cpu, int val)
6913 {
6914 }
6915
6916 static inline bool test_idle_cores(int cpu)
6917 {
6918 return false;
6919 }
6920
6921 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
6922 {
6923 return __select_idle_cpu(core, p);
6924 }
6925
6926 static inline int select_idle_smt(struct task_struct *p, int target)
6927 {
6928 return -1;
6929 }
6930
6931 #endif /* CONFIG_SCHED_SMT */
6932
6933 /*
6934 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6935 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6936 * average idle time for this rq (as found in rq->avg_idle).
6937 */
6938 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
6939 {
6940 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
6941 int i, cpu, idle_cpu = -1, nr = INT_MAX;
6942 struct sched_domain_shared *sd_share;
6943 struct rq *this_rq = this_rq();
6944 int this = smp_processor_id();
6945 struct sched_domain *this_sd = NULL;
6946 u64 time = 0;
6947
6948 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6949
6950 if (sched_feat(SIS_PROP) && !has_idle_core) {
6951 u64 avg_cost, avg_idle, span_avg;
6952 unsigned long now = jiffies;
6953
6954 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6955 if (!this_sd)
6956 return -1;
6957
6958 /*
6959 * If we're busy, the assumption that the last idle period
6960 * predicts the future is flawed; age away the remaining
6961 * predicted idle time.
6962 */
6963 if (unlikely(this_rq->wake_stamp < now)) {
6964 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
6965 this_rq->wake_stamp++;
6966 this_rq->wake_avg_idle >>= 1;
6967 }
6968 }
6969
6970 avg_idle = this_rq->wake_avg_idle;
6971 avg_cost = this_sd->avg_scan_cost + 1;
6972
6973 span_avg = sd->span_weight * avg_idle;
6974 if (span_avg > 4*avg_cost)
6975 nr = div_u64(span_avg, avg_cost);
6976 else
6977 nr = 4;
6978
6979 time = cpu_clock(this);
6980 }
6981
6982 if (sched_feat(SIS_UTIL)) {
6983 sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
6984 if (sd_share) {
6985 /* because !--nr is the condition to stop scan */
6986 nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
6987 /* overloaded LLC is unlikely to have idle cpu/core */
6988 if (nr == 1)
6989 return -1;
6990 }
6991 }
6992
6993 for_each_cpu_wrap(cpu, cpus, target + 1) {
6994 if (has_idle_core) {
6995 i = select_idle_core(p, cpu, cpus, &idle_cpu);
6996 if ((unsigned int)i < nr_cpumask_bits)
6997 return i;
6998
6999 } else {
7000 if (!--nr)
7001 return -1;
7002 idle_cpu = __select_idle_cpu(cpu, p);
7003 if ((unsigned int)idle_cpu < nr_cpumask_bits)
7004 break;
7005 }
7006 }
7007
7008 if (has_idle_core)
7009 set_idle_cores(target, false);
7010
7011 if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) {
7012 time = cpu_clock(this) - time;
7013
7014 /*
7015 * Account for the scan cost of wakeups against the average
7016 * idle time.
7017 */
7018 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
7019
7020 update_avg(&this_sd->avg_scan_cost, time);
7021 }
7022
7023 return idle_cpu;
7024 }
7025
7026 /*
7027 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
7028 * the task fits. If no CPU is big enough, but there are idle ones, try to
7029 * maximize capacity.
7030 */
7031 static int
7032 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
7033 {
7034 unsigned long task_util, util_min, util_max, best_cap = 0;
7035 int fits, best_fits = 0;
7036 int cpu, best_cpu = -1;
7037 struct cpumask *cpus;
7038
7039 cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
7040 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
7041
7042 task_util = task_util_est(p);
7043 util_min = uclamp_eff_value(p, UCLAMP_MIN);
7044 util_max = uclamp_eff_value(p, UCLAMP_MAX);
7045
7046 for_each_cpu_wrap(cpu, cpus, target + 1) {
7047 unsigned long cpu_cap = capacity_of(cpu);
7048
7049 if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
7050 continue;
7051
7052 fits = util_fits_cpu(task_util, util_min, util_max, cpu);
7053
7054 /* This CPU fits with all requirements */
7055 if (fits > 0)
7056 return cpu;
7057 /*
7058 * Only the min performance hint (i.e. uclamp_min) doesn't fit.
7059 * Look for the CPU with best capacity.
7060 */
7061 else if (fits < 0)
7062 cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
7063
7064 /*
7065 * First, select CPU which fits better (-1 being better than 0).
7066 * Then, select the one with best capacity at same level.
7067 */
7068 if ((fits < best_fits) ||
7069 ((fits == best_fits) && (cpu_cap > best_cap))) {
7070 best_cap = cpu_cap;
7071 best_cpu = cpu;
7072 best_fits = fits;
7073 }
7074 }
7075
7076 return best_cpu;
7077 }
7078
7079 static inline bool asym_fits_cpu(unsigned long util,
7080 unsigned long util_min,
7081 unsigned long util_max,
7082 int cpu)
7083 {
7084 if (sched_asym_cpucap_active())
7085 /*
7086 * Return true only if the cpu fully fits the task requirements
7087 * which include the utilization and the performance hints.
7088 */
7089 return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
7090
7091 return true;
7092 }
7093
7094 /*
7095 * Try and locate an idle core/thread in the LLC cache domain.
7096 */
7097 static int select_idle_sibling(struct task_struct *p, int prev, int target)
7098 {
7099 bool has_idle_core = false;
7100 struct sched_domain *sd;
7101 unsigned long task_util, util_min, util_max;
7102 int i, recent_used_cpu;
7103
7104 /*
7105 * On asymmetric system, update task utilization because we will check
7106 * that the task fits with cpu's capacity.
7107 */
7108 if (sched_asym_cpucap_active()) {
7109 sync_entity_load_avg(&p->se);
7110 task_util = task_util_est(p);
7111 util_min = uclamp_eff_value(p, UCLAMP_MIN);
7112 util_max = uclamp_eff_value(p, UCLAMP_MAX);
7113 }
7114
7115 /*
7116 * per-cpu select_rq_mask usage
7117 */
7118 lockdep_assert_irqs_disabled();
7119
7120 if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
7121 asym_fits_cpu(task_util, util_min, util_max, target))
7122 return target;
7123
7124 /*
7125 * If the previous CPU is cache affine and idle, don't be stupid:
7126 */
7127 if (prev != target && cpus_share_cache(prev, target) &&
7128 (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
7129 asym_fits_cpu(task_util, util_min, util_max, prev))
7130 return prev;
7131
7132 /*
7133 * Allow a per-cpu kthread to stack with the wakee if the
7134 * kworker thread and the tasks previous CPUs are the same.
7135 * The assumption is that the wakee queued work for the
7136 * per-cpu kthread that is now complete and the wakeup is
7137 * essentially a sync wakeup. An obvious example of this
7138 * pattern is IO completions.
7139 */
7140 if (is_per_cpu_kthread(current) &&
7141 in_task() &&
7142 prev == smp_processor_id() &&
7143 this_rq()->nr_running <= 1 &&
7144 asym_fits_cpu(task_util, util_min, util_max, prev)) {
7145 return prev;
7146 }
7147
7148 /* Check a recently used CPU as a potential idle candidate: */
7149 recent_used_cpu = p->recent_used_cpu;
7150 p->recent_used_cpu = prev;
7151 if (recent_used_cpu != prev &&
7152 recent_used_cpu != target &&
7153 cpus_share_cache(recent_used_cpu, target) &&
7154 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
7155 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
7156 asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
7157 return recent_used_cpu;
7158 }
7159
7160 /*
7161 * For asymmetric CPU capacity systems, our domain of interest is
7162 * sd_asym_cpucapacity rather than sd_llc.
7163 */
7164 if (sched_asym_cpucap_active()) {
7165 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
7166 /*
7167 * On an asymmetric CPU capacity system where an exclusive
7168 * cpuset defines a symmetric island (i.e. one unique
7169 * capacity_orig value through the cpuset), the key will be set
7170 * but the CPUs within that cpuset will not have a domain with
7171 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
7172 * capacity path.
7173 */
7174 if (sd) {
7175 i = select_idle_capacity(p, sd, target);
7176 return ((unsigned)i < nr_cpumask_bits) ? i : target;
7177 }
7178 }
7179
7180 sd = rcu_dereference(per_cpu(sd_llc, target));
7181 if (!sd)
7182 return target;
7183
7184 if (sched_smt_active()) {
7185 has_idle_core = test_idle_cores(target);
7186
7187 if (!has_idle_core && cpus_share_cache(prev, target)) {
7188 i = select_idle_smt(p, prev);
7189 if ((unsigned int)i < nr_cpumask_bits)
7190 return i;
7191 }
7192 }
7193
7194 i = select_idle_cpu(p, sd, has_idle_core, target);
7195 if ((unsigned)i < nr_cpumask_bits)
7196 return i;
7197
7198 return target;
7199 }
7200
7201 /*
7202 * Predicts what cpu_util(@cpu) would return if @p was removed from @cpu
7203 * (@dst_cpu = -1) or migrated to @dst_cpu.
7204 */
7205 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
7206 {
7207 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
7208 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
7209
7210 /*
7211 * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
7212 * contribution. If @p migrates from another CPU to @cpu add its
7213 * contribution. In all the other cases @cpu is not impacted by the
7214 * migration so its util_avg is already correct.
7215 */
7216 if (task_cpu(p) == cpu && dst_cpu != cpu)
7217 lsub_positive(&util, task_util(p));
7218 else if (task_cpu(p) != cpu && dst_cpu == cpu)
7219 util += task_util(p);
7220
7221 if (sched_feat(UTIL_EST)) {
7222 unsigned long util_est;
7223
7224 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
7225
7226 /*
7227 * During wake-up @p isn't enqueued yet and doesn't contribute
7228 * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
7229 * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
7230 * has been enqueued.
7231 *
7232 * During exec (@dst_cpu = -1) @p is enqueued and does
7233 * contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
7234 * Remove it to "simulate" cpu_util without @p's contribution.
7235 *
7236 * Despite the task_on_rq_queued(@p) check there is still a
7237 * small window for a possible race when an exec
7238 * select_task_rq_fair() races with LB's detach_task().
7239 *
7240 * detach_task()
7241 * deactivate_task()
7242 * p->on_rq = TASK_ON_RQ_MIGRATING;
7243 * -------------------------------- A
7244 * dequeue_task() \
7245 * dequeue_task_fair() + Race Time
7246 * util_est_dequeue() /
7247 * -------------------------------- B
7248 *
7249 * The additional check "current == p" is required to further
7250 * reduce the race window.
7251 */
7252 if (dst_cpu == cpu)
7253 util_est += _task_util_est(p);
7254 else if (unlikely(task_on_rq_queued(p) || current == p))
7255 lsub_positive(&util_est, _task_util_est(p));
7256
7257 util = max(util, util_est);
7258 }
7259
7260 return min(util, capacity_orig_of(cpu));
7261 }
7262
7263 /*
7264 * cpu_util_without: compute cpu utilization without any contributions from *p
7265 * @cpu: the CPU which utilization is requested
7266 * @p: the task which utilization should be discounted
7267 *
7268 * The utilization of a CPU is defined by the utilization of tasks currently
7269 * enqueued on that CPU as well as tasks which are currently sleeping after an
7270 * execution on that CPU.
7271 *
7272 * This method returns the utilization of the specified CPU by discounting the
7273 * utilization of the specified task, whenever the task is currently
7274 * contributing to the CPU utilization.
7275 */
7276 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
7277 {
7278 /* Task has no contribution or is new */
7279 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7280 return cpu_util_cfs(cpu);
7281
7282 return cpu_util_next(cpu, p, -1);
7283 }
7284
7285 /*
7286 * energy_env - Utilization landscape for energy estimation.
7287 * @task_busy_time: Utilization contribution by the task for which we test the
7288 * placement. Given by eenv_task_busy_time().
7289 * @pd_busy_time: Utilization of the whole perf domain without the task
7290 * contribution. Given by eenv_pd_busy_time().
7291 * @cpu_cap: Maximum CPU capacity for the perf domain.
7292 * @pd_cap: Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
7293 */
7294 struct energy_env {
7295 unsigned long task_busy_time;
7296 unsigned long pd_busy_time;
7297 unsigned long cpu_cap;
7298 unsigned long pd_cap;
7299 };
7300
7301 /*
7302 * Compute the task busy time for compute_energy(). This time cannot be
7303 * injected directly into effective_cpu_util() because of the IRQ scaling.
7304 * The latter only makes sense with the most recent CPUs where the task has
7305 * run.
7306 */
7307 static inline void eenv_task_busy_time(struct energy_env *eenv,
7308 struct task_struct *p, int prev_cpu)
7309 {
7310 unsigned long busy_time, max_cap = arch_scale_cpu_capacity(prev_cpu);
7311 unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu));
7312
7313 if (unlikely(irq >= max_cap))
7314 busy_time = max_cap;
7315 else
7316 busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap);
7317
7318 eenv->task_busy_time = busy_time;
7319 }
7320
7321 /*
7322 * Compute the perf_domain (PD) busy time for compute_energy(). Based on the
7323 * utilization for each @pd_cpus, it however doesn't take into account
7324 * clamping since the ratio (utilization / cpu_capacity) is already enough to
7325 * scale the EM reported power consumption at the (eventually clamped)
7326 * cpu_capacity.
7327 *
7328 * The contribution of the task @p for which we want to estimate the
7329 * energy cost is removed (by cpu_util_next()) and must be calculated
7330 * separately (see eenv_task_busy_time). This ensures:
7331 *
7332 * - A stable PD utilization, no matter which CPU of that PD we want to place
7333 * the task on.
7334 *
7335 * - A fair comparison between CPUs as the task contribution (task_util())
7336 * will always be the same no matter which CPU utilization we rely on
7337 * (util_avg or util_est).
7338 *
7339 * Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't
7340 * exceed @eenv->pd_cap.
7341 */
7342 static inline void eenv_pd_busy_time(struct energy_env *eenv,
7343 struct cpumask *pd_cpus,
7344 struct task_struct *p)
7345 {
7346 unsigned long busy_time = 0;
7347 int cpu;
7348
7349 for_each_cpu(cpu, pd_cpus) {
7350 unsigned long util = cpu_util_next(cpu, p, -1);
7351
7352 busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL);
7353 }
7354
7355 eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
7356 }
7357
7358 /*
7359 * Compute the maximum utilization for compute_energy() when the task @p
7360 * is placed on the cpu @dst_cpu.
7361 *
7362 * Returns the maximum utilization among @eenv->cpus. This utilization can't
7363 * exceed @eenv->cpu_cap.
7364 */
7365 static inline unsigned long
7366 eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
7367 struct task_struct *p, int dst_cpu)
7368 {
7369 unsigned long max_util = 0;
7370 int cpu;
7371
7372 for_each_cpu(cpu, pd_cpus) {
7373 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
7374 unsigned long util = cpu_util_next(cpu, p, dst_cpu);
7375 unsigned long cpu_util;
7376
7377 /*
7378 * Performance domain frequency: utilization clamping
7379 * must be considered since it affects the selection
7380 * of the performance domain frequency.
7381 * NOTE: in case RT tasks are running, by default the
7382 * FREQUENCY_UTIL's utilization can be max OPP.
7383 */
7384 cpu_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk);
7385 max_util = max(max_util, cpu_util);
7386 }
7387
7388 return min(max_util, eenv->cpu_cap);
7389 }
7390
7391 /*
7392 * compute_energy(): Use the Energy Model to estimate the energy that @pd would
7393 * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
7394 * contribution is ignored.
7395 */
7396 static inline unsigned long
7397 compute_energy(struct energy_env *eenv, struct perf_domain *pd,
7398 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
7399 {
7400 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
7401 unsigned long busy_time = eenv->pd_busy_time;
7402
7403 if (dst_cpu >= 0)
7404 busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
7405
7406 return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
7407 }
7408
7409 /*
7410 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
7411 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
7412 * spare capacity in each performance domain and uses it as a potential
7413 * candidate to execute the task. Then, it uses the Energy Model to figure
7414 * out which of the CPU candidates is the most energy-efficient.
7415 *
7416 * The rationale for this heuristic is as follows. In a performance domain,
7417 * all the most energy efficient CPU candidates (according to the Energy
7418 * Model) are those for which we'll request a low frequency. When there are
7419 * several CPUs for which the frequency request will be the same, we don't
7420 * have enough data to break the tie between them, because the Energy Model
7421 * only includes active power costs. With this model, if we assume that
7422 * frequency requests follow utilization (e.g. using schedutil), the CPU with
7423 * the maximum spare capacity in a performance domain is guaranteed to be among
7424 * the best candidates of the performance domain.
7425 *
7426 * In practice, it could be preferable from an energy standpoint to pack
7427 * small tasks on a CPU in order to let other CPUs go in deeper idle states,
7428 * but that could also hurt our chances to go cluster idle, and we have no
7429 * ways to tell with the current Energy Model if this is actually a good
7430 * idea or not. So, find_energy_efficient_cpu() basically favors
7431 * cluster-packing, and spreading inside a cluster. That should at least be
7432 * a good thing for latency, and this is consistent with the idea that most
7433 * of the energy savings of EAS come from the asymmetry of the system, and
7434 * not so much from breaking the tie between identical CPUs. That's also the
7435 * reason why EAS is enabled in the topology code only for systems where
7436 * SD_ASYM_CPUCAPACITY is set.
7437 *
7438 * NOTE: Forkees are not accepted in the energy-aware wake-up path because
7439 * they don't have any useful utilization data yet and it's not possible to
7440 * forecast their impact on energy consumption. Consequently, they will be
7441 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
7442 * to be energy-inefficient in some use-cases. The alternative would be to
7443 * bias new tasks towards specific types of CPUs first, or to try to infer
7444 * their util_avg from the parent task, but those heuristics could hurt
7445 * other use-cases too. So, until someone finds a better way to solve this,
7446 * let's keep things simple by re-using the existing slow path.
7447 */
7448 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
7449 {
7450 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
7451 unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
7452 unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
7453 unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
7454 struct root_domain *rd = this_rq()->rd;
7455 int cpu, best_energy_cpu, target = -1;
7456 int prev_fits = -1, best_fits = -1;
7457 unsigned long best_thermal_cap = 0;
7458 unsigned long prev_thermal_cap = 0;
7459 struct sched_domain *sd;
7460 struct perf_domain *pd;
7461 struct energy_env eenv;
7462
7463 rcu_read_lock();
7464 pd = rcu_dereference(rd->pd);
7465 if (!pd || READ_ONCE(rd->overutilized))
7466 goto unlock;
7467
7468 /*
7469 * Energy-aware wake-up happens on the lowest sched_domain starting
7470 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
7471 */
7472 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
7473 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
7474 sd = sd->parent;
7475 if (!sd)
7476 goto unlock;
7477
7478 target = prev_cpu;
7479
7480 sync_entity_load_avg(&p->se);
7481 if (!uclamp_task_util(p, p_util_min, p_util_max))
7482 goto unlock;
7483
7484 eenv_task_busy_time(&eenv, p, prev_cpu);
7485
7486 for (; pd; pd = pd->next) {
7487 unsigned long util_min = p_util_min, util_max = p_util_max;
7488 unsigned long cpu_cap, cpu_thermal_cap, util;
7489 unsigned long cur_delta, max_spare_cap = 0;
7490 unsigned long rq_util_min, rq_util_max;
7491 unsigned long prev_spare_cap = 0;
7492 int max_spare_cap_cpu = -1;
7493 unsigned long base_energy;
7494 int fits, max_fits = -1;
7495
7496 cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
7497
7498 if (cpumask_empty(cpus))
7499 continue;
7500
7501 /* Account thermal pressure for the energy estimation */
7502 cpu = cpumask_first(cpus);
7503 cpu_thermal_cap = arch_scale_cpu_capacity(cpu);
7504 cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
7505
7506 eenv.cpu_cap = cpu_thermal_cap;
7507 eenv.pd_cap = 0;
7508
7509 for_each_cpu(cpu, cpus) {
7510 struct rq *rq = cpu_rq(cpu);
7511
7512 eenv.pd_cap += cpu_thermal_cap;
7513
7514 if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
7515 continue;
7516
7517 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
7518 continue;
7519
7520 util = cpu_util_next(cpu, p, cpu);
7521 cpu_cap = capacity_of(cpu);
7522
7523 /*
7524 * Skip CPUs that cannot satisfy the capacity request.
7525 * IOW, placing the task there would make the CPU
7526 * overutilized. Take uclamp into account to see how
7527 * much capacity we can get out of the CPU; this is
7528 * aligned with sched_cpu_util().
7529 */
7530 if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
7531 /*
7532 * Open code uclamp_rq_util_with() except for
7533 * the clamp() part. Ie: apply max aggregation
7534 * only. util_fits_cpu() logic requires to
7535 * operate on non clamped util but must use the
7536 * max-aggregated uclamp_{min, max}.
7537 */
7538 rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
7539 rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
7540
7541 util_min = max(rq_util_min, p_util_min);
7542 util_max = max(rq_util_max, p_util_max);
7543 }
7544
7545 fits = util_fits_cpu(util, util_min, util_max, cpu);
7546 if (!fits)
7547 continue;
7548
7549 lsub_positive(&cpu_cap, util);
7550
7551 if (cpu == prev_cpu) {
7552 /* Always use prev_cpu as a candidate. */
7553 prev_spare_cap = cpu_cap;
7554 prev_fits = fits;
7555 } else if ((fits > max_fits) ||
7556 ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
7557 /*
7558 * Find the CPU with the maximum spare capacity
7559 * among the remaining CPUs in the performance
7560 * domain.
7561 */
7562 max_spare_cap = cpu_cap;
7563 max_spare_cap_cpu = cpu;
7564 max_fits = fits;
7565 }
7566 }
7567
7568 if (max_spare_cap_cpu < 0 && prev_spare_cap == 0)
7569 continue;
7570
7571 eenv_pd_busy_time(&eenv, cpus, p);
7572 /* Compute the 'base' energy of the pd, without @p */
7573 base_energy = compute_energy(&eenv, pd, cpus, p, -1);
7574
7575 /* Evaluate the energy impact of using prev_cpu. */
7576 if (prev_spare_cap > 0) {
7577 prev_delta = compute_energy(&eenv, pd, cpus, p,
7578 prev_cpu);
7579 /* CPU utilization has changed */
7580 if (prev_delta < base_energy)
7581 goto unlock;
7582 prev_delta -= base_energy;
7583 prev_thermal_cap = cpu_thermal_cap;
7584 best_delta = min(best_delta, prev_delta);
7585 }
7586
7587 /* Evaluate the energy impact of using max_spare_cap_cpu. */
7588 if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
7589 /* Current best energy cpu fits better */
7590 if (max_fits < best_fits)
7591 continue;
7592
7593 /*
7594 * Both don't fit performance hint (i.e. uclamp_min)
7595 * but best energy cpu has better capacity.
7596 */
7597 if ((max_fits < 0) &&
7598 (cpu_thermal_cap <= best_thermal_cap))
7599 continue;
7600
7601 cur_delta = compute_energy(&eenv, pd, cpus, p,
7602 max_spare_cap_cpu);
7603 /* CPU utilization has changed */
7604 if (cur_delta < base_energy)
7605 goto unlock;
7606 cur_delta -= base_energy;
7607
7608 /*
7609 * Both fit for the task but best energy cpu has lower
7610 * energy impact.
7611 */
7612 if ((max_fits > 0) && (best_fits > 0) &&
7613 (cur_delta >= best_delta))
7614 continue;
7615
7616 best_delta = cur_delta;
7617 best_energy_cpu = max_spare_cap_cpu;
7618 best_fits = max_fits;
7619 best_thermal_cap = cpu_thermal_cap;
7620 }
7621 }
7622 rcu_read_unlock();
7623
7624 if ((best_fits > prev_fits) ||
7625 ((best_fits > 0) && (best_delta < prev_delta)) ||
7626 ((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
7627 target = best_energy_cpu;
7628
7629 return target;
7630
7631 unlock:
7632 rcu_read_unlock();
7633
7634 return target;
7635 }
7636
7637 /*
7638 * select_task_rq_fair: Select target runqueue for the waking task in domains
7639 * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
7640 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
7641 *
7642 * Balances load by selecting the idlest CPU in the idlest group, or under
7643 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
7644 *
7645 * Returns the target CPU number.
7646 */
7647 static int
7648 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
7649 {
7650 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
7651 struct sched_domain *tmp, *sd = NULL;
7652 int cpu = smp_processor_id();
7653 int new_cpu = prev_cpu;
7654 int want_affine = 0;
7655 /* SD_flags and WF_flags share the first nibble */
7656 int sd_flag = wake_flags & 0xF;
7657
7658 /*
7659 * required for stable ->cpus_allowed
7660 */
7661 lockdep_assert_held(&p->pi_lock);
7662 if (wake_flags & WF_TTWU) {
7663 record_wakee(p);
7664
7665 if (sched_energy_enabled()) {
7666 new_cpu = find_energy_efficient_cpu(p, prev_cpu);
7667 if (new_cpu >= 0)
7668 return new_cpu;
7669 new_cpu = prev_cpu;
7670 }
7671
7672 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
7673 }
7674
7675 rcu_read_lock();
7676 for_each_domain(cpu, tmp) {
7677 /*
7678 * If both 'cpu' and 'prev_cpu' are part of this domain,
7679 * cpu is a valid SD_WAKE_AFFINE target.
7680 */
7681 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
7682 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
7683 if (cpu != prev_cpu)
7684 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
7685
7686 sd = NULL; /* Prefer wake_affine over balance flags */
7687 break;
7688 }
7689
7690 /*
7691 * Usually only true for WF_EXEC and WF_FORK, as sched_domains
7692 * usually do not have SD_BALANCE_WAKE set. That means wakeup
7693 * will usually go to the fast path.
7694 */
7695 if (tmp->flags & sd_flag)
7696 sd = tmp;
7697 else if (!want_affine)
7698 break;
7699 }
7700
7701 if (unlikely(sd)) {
7702 /* Slow path */
7703 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
7704 } else if (wake_flags & WF_TTWU) { /* XXX always ? */
7705 /* Fast path */
7706 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
7707 }
7708 rcu_read_unlock();
7709
7710 return new_cpu;
7711 }
7712
7713 /*
7714 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
7715 * cfs_rq_of(p) references at time of call are still valid and identify the
7716 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
7717 */
7718 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
7719 {
7720 struct sched_entity *se = &p->se;
7721
7722 /*
7723 * As blocked tasks retain absolute vruntime the migration needs to
7724 * deal with this by subtracting the old and adding the new
7725 * min_vruntime -- the latter is done by enqueue_entity() when placing
7726 * the task on the new runqueue.
7727 */
7728 if (READ_ONCE(p->__state) == TASK_WAKING) {
7729 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7730
7731 se->vruntime -= u64_u32_load(cfs_rq->min_vruntime);
7732 }
7733
7734 if (!task_on_rq_migrating(p)) {
7735 remove_entity_load_avg(se);
7736
7737 /*
7738 * Here, the task's PELT values have been updated according to
7739 * the current rq's clock. But if that clock hasn't been
7740 * updated in a while, a substantial idle time will be missed,
7741 * leading to an inflation after wake-up on the new rq.
7742 *
7743 * Estimate the missing time from the cfs_rq last_update_time
7744 * and update sched_avg to improve the PELT continuity after
7745 * migration.
7746 */
7747 migrate_se_pelt_lag(se);
7748 }
7749
7750 /* Tell new CPU we are migrated */
7751 se->avg.last_update_time = 0;
7752
7753 update_scan_period(p, new_cpu);
7754 }
7755
7756 static void task_dead_fair(struct task_struct *p)
7757 {
7758 remove_entity_load_avg(&p->se);
7759 }
7760
7761 static int
7762 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7763 {
7764 if (rq->nr_running)
7765 return 1;
7766
7767 return newidle_balance(rq, rf) != 0;
7768 }
7769 #endif /* CONFIG_SMP */
7770
7771 static unsigned long wakeup_gran(struct sched_entity *se)
7772 {
7773 unsigned long gran = sysctl_sched_wakeup_granularity;
7774
7775 /*
7776 * Since its curr running now, convert the gran from real-time
7777 * to virtual-time in his units.
7778 *
7779 * By using 'se' instead of 'curr' we penalize light tasks, so
7780 * they get preempted easier. That is, if 'se' < 'curr' then
7781 * the resulting gran will be larger, therefore penalizing the
7782 * lighter, if otoh 'se' > 'curr' then the resulting gran will
7783 * be smaller, again penalizing the lighter task.
7784 *
7785 * This is especially important for buddies when the leftmost
7786 * task is higher priority than the buddy.
7787 */
7788 return calc_delta_fair(gran, se);
7789 }
7790
7791 /*
7792 * Should 'se' preempt 'curr'.
7793 *
7794 * |s1
7795 * |s2
7796 * |s3
7797 * g
7798 * |<--->|c
7799 *
7800 * w(c, s1) = -1
7801 * w(c, s2) = 0
7802 * w(c, s3) = 1
7803 *
7804 */
7805 static int
7806 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
7807 {
7808 s64 gran, vdiff = curr->vruntime - se->vruntime;
7809
7810 if (vdiff <= 0)
7811 return -1;
7812
7813 gran = wakeup_gran(se);
7814 if (vdiff > gran)
7815 return 1;
7816
7817 return 0;
7818 }
7819
7820 static void set_last_buddy(struct sched_entity *se)
7821 {
7822 for_each_sched_entity(se) {
7823 if (SCHED_WARN_ON(!se->on_rq))
7824 return;
7825 if (se_is_idle(se))
7826 return;
7827 cfs_rq_of(se)->last = se;
7828 }
7829 }
7830
7831 static void set_next_buddy(struct sched_entity *se)
7832 {
7833 for_each_sched_entity(se) {
7834 if (SCHED_WARN_ON(!se->on_rq))
7835 return;
7836 if (se_is_idle(se))
7837 return;
7838 cfs_rq_of(se)->next = se;
7839 }
7840 }
7841
7842 static void set_skip_buddy(struct sched_entity *se)
7843 {
7844 for_each_sched_entity(se)
7845 cfs_rq_of(se)->skip = se;
7846 }
7847
7848 /*
7849 * Preempt the current task with a newly woken task if needed:
7850 */
7851 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
7852 {
7853 struct task_struct *curr = rq->curr;
7854 struct sched_entity *se = &curr->se, *pse = &p->se;
7855 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
7856 int scale = cfs_rq->nr_running >= sched_nr_latency;
7857 int next_buddy_marked = 0;
7858 int cse_is_idle, pse_is_idle;
7859
7860 if (unlikely(se == pse))
7861 return;
7862
7863 /*
7864 * This is possible from callers such as attach_tasks(), in which we
7865 * unconditionally check_preempt_curr() after an enqueue (which may have
7866 * lead to a throttle). This both saves work and prevents false
7867 * next-buddy nomination below.
7868 */
7869 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
7870 return;
7871
7872 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
7873 set_next_buddy(pse);
7874 next_buddy_marked = 1;
7875 }
7876
7877 /*
7878 * We can come here with TIF_NEED_RESCHED already set from new task
7879 * wake up path.
7880 *
7881 * Note: this also catches the edge-case of curr being in a throttled
7882 * group (e.g. via set_curr_task), since update_curr() (in the
7883 * enqueue of curr) will have resulted in resched being set. This
7884 * prevents us from potentially nominating it as a false LAST_BUDDY
7885 * below.
7886 */
7887 if (test_tsk_need_resched(curr))
7888 return;
7889
7890 /* Idle tasks are by definition preempted by non-idle tasks. */
7891 if (unlikely(task_has_idle_policy(curr)) &&
7892 likely(!task_has_idle_policy(p)))
7893 goto preempt;
7894
7895 /*
7896 * Batch and idle tasks do not preempt non-idle tasks (their preemption
7897 * is driven by the tick):
7898 */
7899 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
7900 return;
7901
7902 find_matching_se(&se, &pse);
7903 WARN_ON_ONCE(!pse);
7904
7905 cse_is_idle = se_is_idle(se);
7906 pse_is_idle = se_is_idle(pse);
7907
7908 /*
7909 * Preempt an idle group in favor of a non-idle group (and don't preempt
7910 * in the inverse case).
7911 */
7912 if (cse_is_idle && !pse_is_idle)
7913 goto preempt;
7914 if (cse_is_idle != pse_is_idle)
7915 return;
7916
7917 update_curr(cfs_rq_of(se));
7918 if (wakeup_preempt_entity(se, pse) == 1) {
7919 /*
7920 * Bias pick_next to pick the sched entity that is
7921 * triggering this preemption.
7922 */
7923 if (!next_buddy_marked)
7924 set_next_buddy(pse);
7925 goto preempt;
7926 }
7927
7928 return;
7929
7930 preempt:
7931 resched_curr(rq);
7932 /*
7933 * Only set the backward buddy when the current task is still
7934 * on the rq. This can happen when a wakeup gets interleaved
7935 * with schedule on the ->pre_schedule() or idle_balance()
7936 * point, either of which can * drop the rq lock.
7937 *
7938 * Also, during early boot the idle thread is in the fair class,
7939 * for obvious reasons its a bad idea to schedule back to it.
7940 */
7941 if (unlikely(!se->on_rq || curr == rq->idle))
7942 return;
7943
7944 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
7945 set_last_buddy(se);
7946 }
7947
7948 #ifdef CONFIG_SMP
7949 static struct task_struct *pick_task_fair(struct rq *rq)
7950 {
7951 struct sched_entity *se;
7952 struct cfs_rq *cfs_rq;
7953
7954 again:
7955 cfs_rq = &rq->cfs;
7956 if (!cfs_rq->nr_running)
7957 return NULL;
7958
7959 do {
7960 struct sched_entity *curr = cfs_rq->curr;
7961
7962 /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
7963 if (curr) {
7964 if (curr->on_rq)
7965 update_curr(cfs_rq);
7966 else
7967 curr = NULL;
7968
7969 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
7970 goto again;
7971 }
7972
7973 se = pick_next_entity(cfs_rq, curr);
7974 cfs_rq = group_cfs_rq(se);
7975 } while (cfs_rq);
7976
7977 return task_of(se);
7978 }
7979 #endif
7980
7981 struct task_struct *
7982 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
7983 {
7984 struct cfs_rq *cfs_rq = &rq->cfs;
7985 struct sched_entity *se;
7986 struct task_struct *p;
7987 int new_tasks;
7988
7989 again:
7990 if (!sched_fair_runnable(rq))
7991 goto idle;
7992
7993 #ifdef CONFIG_FAIR_GROUP_SCHED
7994 if (!prev || prev->sched_class != &fair_sched_class)
7995 goto simple;
7996
7997 /*
7998 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
7999 * likely that a next task is from the same cgroup as the current.
8000 *
8001 * Therefore attempt to avoid putting and setting the entire cgroup
8002 * hierarchy, only change the part that actually changes.
8003 */
8004
8005 do {
8006 struct sched_entity *curr = cfs_rq->curr;
8007
8008 /*
8009 * Since we got here without doing put_prev_entity() we also
8010 * have to consider cfs_rq->curr. If it is still a runnable
8011 * entity, update_curr() will update its vruntime, otherwise
8012 * forget we've ever seen it.
8013 */
8014 if (curr) {
8015 if (curr->on_rq)
8016 update_curr(cfs_rq);
8017 else
8018 curr = NULL;
8019
8020 /*
8021 * This call to check_cfs_rq_runtime() will do the
8022 * throttle and dequeue its entity in the parent(s).
8023 * Therefore the nr_running test will indeed
8024 * be correct.
8025 */
8026 if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
8027 cfs_rq = &rq->cfs;
8028
8029 if (!cfs_rq->nr_running)
8030 goto idle;
8031
8032 goto simple;
8033 }
8034 }
8035
8036 se = pick_next_entity(cfs_rq, curr);
8037 cfs_rq = group_cfs_rq(se);
8038 } while (cfs_rq);
8039
8040 p = task_of(se);
8041
8042 /*
8043 * Since we haven't yet done put_prev_entity and if the selected task
8044 * is a different task than we started out with, try and touch the
8045 * least amount of cfs_rqs.
8046 */
8047 if (prev != p) {
8048 struct sched_entity *pse = &prev->se;
8049
8050 while (!(cfs_rq = is_same_group(se, pse))) {
8051 int se_depth = se->depth;
8052 int pse_depth = pse->depth;
8053
8054 if (se_depth <= pse_depth) {
8055 put_prev_entity(cfs_rq_of(pse), pse);
8056 pse = parent_entity(pse);
8057 }
8058 if (se_depth >= pse_depth) {
8059 set_next_entity(cfs_rq_of(se), se);
8060 se = parent_entity(se);
8061 }
8062 }
8063
8064 put_prev_entity(cfs_rq, pse);
8065 set_next_entity(cfs_rq, se);
8066 }
8067
8068 goto done;
8069 simple:
8070 #endif
8071 if (prev)
8072 put_prev_task(rq, prev);
8073
8074 do {
8075 se = pick_next_entity(cfs_rq, NULL);
8076 set_next_entity(cfs_rq, se);
8077 cfs_rq = group_cfs_rq(se);
8078 } while (cfs_rq);
8079
8080 p = task_of(se);
8081
8082 done: __maybe_unused;
8083 #ifdef CONFIG_SMP
8084 /*
8085 * Move the next running task to the front of
8086 * the list, so our cfs_tasks list becomes MRU
8087 * one.
8088 */
8089 list_move(&p->se.group_node, &rq->cfs_tasks);
8090 #endif
8091
8092 if (hrtick_enabled_fair(rq))
8093 hrtick_start_fair(rq, p);
8094
8095 update_misfit_status(p, rq);
8096
8097 return p;
8098
8099 idle:
8100 if (!rf)
8101 return NULL;
8102
8103 new_tasks = newidle_balance(rq, rf);
8104
8105 /*
8106 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
8107 * possible for any higher priority task to appear. In that case we
8108 * must re-start the pick_next_entity() loop.
8109 */
8110 if (new_tasks < 0)
8111 return RETRY_TASK;
8112
8113 if (new_tasks > 0)
8114 goto again;
8115
8116 /*
8117 * rq is about to be idle, check if we need to update the
8118 * lost_idle_time of clock_pelt
8119 */
8120 update_idle_rq_clock_pelt(rq);
8121
8122 return NULL;
8123 }
8124
8125 static struct task_struct *__pick_next_task_fair(struct rq *rq)
8126 {
8127 return pick_next_task_fair(rq, NULL, NULL);
8128 }
8129
8130 /*
8131 * Account for a descheduled task:
8132 */
8133 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
8134 {
8135 struct sched_entity *se = &prev->se;
8136 struct cfs_rq *cfs_rq;
8137
8138 for_each_sched_entity(se) {
8139 cfs_rq = cfs_rq_of(se);
8140 put_prev_entity(cfs_rq, se);
8141 }
8142 }
8143
8144 /*
8145 * sched_yield() is very simple
8146 *
8147 * The magic of dealing with the ->skip buddy is in pick_next_entity.
8148 */
8149 static void yield_task_fair(struct rq *rq)
8150 {
8151 struct task_struct *curr = rq->curr;
8152 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8153 struct sched_entity *se = &curr->se;
8154
8155 /*
8156 * Are we the only task in the tree?
8157 */
8158 if (unlikely(rq->nr_running == 1))
8159 return;
8160
8161 clear_buddies(cfs_rq, se);
8162
8163 if (curr->policy != SCHED_BATCH) {
8164 update_rq_clock(rq);
8165 /*
8166 * Update run-time statistics of the 'current'.
8167 */
8168 update_curr(cfs_rq);
8169 /*
8170 * Tell update_rq_clock() that we've just updated,
8171 * so we don't do microscopic update in schedule()
8172 * and double the fastpath cost.
8173 */
8174 rq_clock_skip_update(rq);
8175 }
8176
8177 set_skip_buddy(se);
8178 }
8179
8180 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
8181 {
8182 struct sched_entity *se = &p->se;
8183
8184 /* throttled hierarchies are not runnable */
8185 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
8186 return false;
8187
8188 /* Tell the scheduler that we'd really like pse to run next. */
8189 set_next_buddy(se);
8190
8191 yield_task_fair(rq);
8192
8193 return true;
8194 }
8195
8196 #ifdef CONFIG_SMP
8197 /**************************************************
8198 * Fair scheduling class load-balancing methods.
8199 *
8200 * BASICS
8201 *
8202 * The purpose of load-balancing is to achieve the same basic fairness the
8203 * per-CPU scheduler provides, namely provide a proportional amount of compute
8204 * time to each task. This is expressed in the following equation:
8205 *
8206 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
8207 *
8208 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
8209 * W_i,0 is defined as:
8210 *
8211 * W_i,0 = \Sum_j w_i,j (2)
8212 *
8213 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
8214 * is derived from the nice value as per sched_prio_to_weight[].
8215 *
8216 * The weight average is an exponential decay average of the instantaneous
8217 * weight:
8218 *
8219 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
8220 *
8221 * C_i is the compute capacity of CPU i, typically it is the
8222 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
8223 * can also include other factors [XXX].
8224 *
8225 * To achieve this balance we define a measure of imbalance which follows
8226 * directly from (1):
8227 *
8228 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
8229 *
8230 * We them move tasks around to minimize the imbalance. In the continuous
8231 * function space it is obvious this converges, in the discrete case we get
8232 * a few fun cases generally called infeasible weight scenarios.
8233 *
8234 * [XXX expand on:
8235 * - infeasible weights;
8236 * - local vs global optima in the discrete case. ]
8237 *
8238 *
8239 * SCHED DOMAINS
8240 *
8241 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
8242 * for all i,j solution, we create a tree of CPUs that follows the hardware
8243 * topology where each level pairs two lower groups (or better). This results
8244 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
8245 * tree to only the first of the previous level and we decrease the frequency
8246 * of load-balance at each level inv. proportional to the number of CPUs in
8247 * the groups.
8248 *
8249 * This yields:
8250 *
8251 * log_2 n 1 n
8252 * \Sum { --- * --- * 2^i } = O(n) (5)
8253 * i = 0 2^i 2^i
8254 * `- size of each group
8255 * | | `- number of CPUs doing load-balance
8256 * | `- freq
8257 * `- sum over all levels
8258 *
8259 * Coupled with a limit on how many tasks we can migrate every balance pass,
8260 * this makes (5) the runtime complexity of the balancer.
8261 *
8262 * An important property here is that each CPU is still (indirectly) connected
8263 * to every other CPU in at most O(log n) steps:
8264 *
8265 * The adjacency matrix of the resulting graph is given by:
8266 *
8267 * log_2 n
8268 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
8269 * k = 0
8270 *
8271 * And you'll find that:
8272 *
8273 * A^(log_2 n)_i,j != 0 for all i,j (7)
8274 *
8275 * Showing there's indeed a path between every CPU in at most O(log n) steps.
8276 * The task movement gives a factor of O(m), giving a convergence complexity
8277 * of:
8278 *
8279 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
8280 *
8281 *
8282 * WORK CONSERVING
8283 *
8284 * In order to avoid CPUs going idle while there's still work to do, new idle
8285 * balancing is more aggressive and has the newly idle CPU iterate up the domain
8286 * tree itself instead of relying on other CPUs to bring it work.
8287 *
8288 * This adds some complexity to both (5) and (8) but it reduces the total idle
8289 * time.
8290 *
8291 * [XXX more?]
8292 *
8293 *
8294 * CGROUPS
8295 *
8296 * Cgroups make a horror show out of (2), instead of a simple sum we get:
8297 *
8298 * s_k,i
8299 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
8300 * S_k
8301 *
8302 * Where
8303 *
8304 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
8305 *
8306 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
8307 *
8308 * The big problem is S_k, its a global sum needed to compute a local (W_i)
8309 * property.
8310 *
8311 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
8312 * rewrite all of this once again.]
8313 */
8314
8315 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
8316
8317 enum fbq_type { regular, remote, all };
8318
8319 /*
8320 * 'group_type' describes the group of CPUs at the moment of load balancing.
8321 *
8322 * The enum is ordered by pulling priority, with the group with lowest priority
8323 * first so the group_type can simply be compared when selecting the busiest
8324 * group. See update_sd_pick_busiest().
8325 */
8326 enum group_type {
8327 /* The group has spare capacity that can be used to run more tasks. */
8328 group_has_spare = 0,
8329 /*
8330 * The group is fully used and the tasks don't compete for more CPU
8331 * cycles. Nevertheless, some tasks might wait before running.
8332 */
8333 group_fully_busy,
8334 /*
8335 * One task doesn't fit with CPU's capacity and must be migrated to a
8336 * more powerful CPU.
8337 */
8338 group_misfit_task,
8339 /*
8340 * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
8341 * and the task should be migrated to it instead of running on the
8342 * current CPU.
8343 */
8344 group_asym_packing,
8345 /*
8346 * The tasks' affinity constraints previously prevented the scheduler
8347 * from balancing the load across the system.
8348 */
8349 group_imbalanced,
8350 /*
8351 * The CPU is overloaded and can't provide expected CPU cycles to all
8352 * tasks.
8353 */
8354 group_overloaded
8355 };
8356
8357 enum migration_type {
8358 migrate_load = 0,
8359 migrate_util,
8360 migrate_task,
8361 migrate_misfit
8362 };
8363
8364 #define LBF_ALL_PINNED 0x01
8365 #define LBF_NEED_BREAK 0x02
8366 #define LBF_DST_PINNED 0x04
8367 #define LBF_SOME_PINNED 0x08
8368 #define LBF_ACTIVE_LB 0x10
8369
8370 struct lb_env {
8371 struct sched_domain *sd;
8372
8373 struct rq *src_rq;
8374 int src_cpu;
8375
8376 int dst_cpu;
8377 struct rq *dst_rq;
8378
8379 struct cpumask *dst_grpmask;
8380 int new_dst_cpu;
8381 enum cpu_idle_type idle;
8382 long imbalance;
8383 /* The set of CPUs under consideration for load-balancing */
8384 struct cpumask *cpus;
8385
8386 unsigned int flags;
8387
8388 unsigned int loop;
8389 unsigned int loop_break;
8390 unsigned int loop_max;
8391
8392 enum fbq_type fbq_type;
8393 enum migration_type migration_type;
8394 struct list_head tasks;
8395 };
8396
8397 /*
8398 * Is this task likely cache-hot:
8399 */
8400 static int task_hot(struct task_struct *p, struct lb_env *env)
8401 {
8402 s64 delta;
8403
8404 lockdep_assert_rq_held(env->src_rq);
8405
8406 if (p->sched_class != &fair_sched_class)
8407 return 0;
8408
8409 if (unlikely(task_has_idle_policy(p)))
8410 return 0;
8411
8412 /* SMT siblings share cache */
8413 if (env->sd->flags & SD_SHARE_CPUCAPACITY)
8414 return 0;
8415
8416 /*
8417 * Buddy candidates are cache hot:
8418 */
8419 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
8420 (&p->se == cfs_rq_of(&p->se)->next ||
8421 &p->se == cfs_rq_of(&p->se)->last))
8422 return 1;
8423
8424 if (sysctl_sched_migration_cost == -1)
8425 return 1;
8426
8427 /*
8428 * Don't migrate task if the task's cookie does not match
8429 * with the destination CPU's core cookie.
8430 */
8431 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
8432 return 1;
8433
8434 if (sysctl_sched_migration_cost == 0)
8435 return 0;
8436
8437 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
8438
8439 return delta < (s64)sysctl_sched_migration_cost;
8440 }
8441
8442 #ifdef CONFIG_NUMA_BALANCING
8443 /*
8444 * Returns 1, if task migration degrades locality
8445 * Returns 0, if task migration improves locality i.e migration preferred.
8446 * Returns -1, if task migration is not affected by locality.
8447 */
8448 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
8449 {
8450 struct numa_group *numa_group = rcu_dereference(p->numa_group);
8451 unsigned long src_weight, dst_weight;
8452 int src_nid, dst_nid, dist;
8453
8454 if (!static_branch_likely(&sched_numa_balancing))
8455 return -1;
8456
8457 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
8458 return -1;
8459
8460 src_nid = cpu_to_node(env->src_cpu);
8461 dst_nid = cpu_to_node(env->dst_cpu);
8462
8463 if (src_nid == dst_nid)
8464 return -1;
8465
8466 /* Migrating away from the preferred node is always bad. */
8467 if (src_nid == p->numa_preferred_nid) {
8468 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
8469 return 1;
8470 else
8471 return -1;
8472 }
8473
8474 /* Encourage migration to the preferred node. */
8475 if (dst_nid == p->numa_preferred_nid)
8476 return 0;
8477
8478 /* Leaving a core idle is often worse than degrading locality. */
8479 if (env->idle == CPU_IDLE)
8480 return -1;
8481
8482 dist = node_distance(src_nid, dst_nid);
8483 if (numa_group) {
8484 src_weight = group_weight(p, src_nid, dist);
8485 dst_weight = group_weight(p, dst_nid, dist);
8486 } else {
8487 src_weight = task_weight(p, src_nid, dist);
8488 dst_weight = task_weight(p, dst_nid, dist);
8489 }
8490
8491 return dst_weight < src_weight;
8492 }
8493
8494 #else
8495 static inline int migrate_degrades_locality(struct task_struct *p,
8496 struct lb_env *env)
8497 {
8498 return -1;
8499 }
8500 #endif
8501
8502 /*
8503 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
8504 */
8505 static
8506 int can_migrate_task(struct task_struct *p, struct lb_env *env)
8507 {
8508 int tsk_cache_hot;
8509
8510 lockdep_assert_rq_held(env->src_rq);
8511
8512 /*
8513 * We do not migrate tasks that are:
8514 * 1) throttled_lb_pair, or
8515 * 2) cannot be migrated to this CPU due to cpus_ptr, or
8516 * 3) running (obviously), or
8517 * 4) are cache-hot on their current CPU.
8518 */
8519 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
8520 return 0;
8521
8522 /* Disregard pcpu kthreads; they are where they need to be. */
8523 if (kthread_is_per_cpu(p))
8524 return 0;
8525
8526 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
8527 int cpu;
8528
8529 schedstat_inc(p->stats.nr_failed_migrations_affine);
8530
8531 env->flags |= LBF_SOME_PINNED;
8532
8533 /*
8534 * Remember if this task can be migrated to any other CPU in
8535 * our sched_group. We may want to revisit it if we couldn't
8536 * meet load balance goals by pulling other tasks on src_cpu.
8537 *
8538 * Avoid computing new_dst_cpu
8539 * - for NEWLY_IDLE
8540 * - if we have already computed one in current iteration
8541 * - if it's an active balance
8542 */
8543 if (env->idle == CPU_NEWLY_IDLE ||
8544 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
8545 return 0;
8546
8547 /* Prevent to re-select dst_cpu via env's CPUs: */
8548 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
8549 if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
8550 env->flags |= LBF_DST_PINNED;
8551 env->new_dst_cpu = cpu;
8552 break;
8553 }
8554 }
8555
8556 return 0;
8557 }
8558
8559 /* Record that we found at least one task that could run on dst_cpu */
8560 env->flags &= ~LBF_ALL_PINNED;
8561
8562 if (task_on_cpu(env->src_rq, p)) {
8563 schedstat_inc(p->stats.nr_failed_migrations_running);
8564 return 0;
8565 }
8566
8567 /*
8568 * Aggressive migration if:
8569 * 1) active balance
8570 * 2) destination numa is preferred
8571 * 3) task is cache cold, or
8572 * 4) too many balance attempts have failed.
8573 */
8574 if (env->flags & LBF_ACTIVE_LB)
8575 return 1;
8576
8577 tsk_cache_hot = migrate_degrades_locality(p, env);
8578 if (tsk_cache_hot == -1)
8579 tsk_cache_hot = task_hot(p, env);
8580
8581 if (tsk_cache_hot <= 0 ||
8582 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
8583 if (tsk_cache_hot == 1) {
8584 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
8585 schedstat_inc(p->stats.nr_forced_migrations);
8586 }
8587 return 1;
8588 }
8589
8590 schedstat_inc(p->stats.nr_failed_migrations_hot);
8591 return 0;
8592 }
8593
8594 /*
8595 * detach_task() -- detach the task for the migration specified in env
8596 */
8597 static void detach_task(struct task_struct *p, struct lb_env *env)
8598 {
8599 lockdep_assert_rq_held(env->src_rq);
8600
8601 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
8602 set_task_cpu(p, env->dst_cpu);
8603 }
8604
8605 /*
8606 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
8607 * part of active balancing operations within "domain".
8608 *
8609 * Returns a task if successful and NULL otherwise.
8610 */
8611 static struct task_struct *detach_one_task(struct lb_env *env)
8612 {
8613 struct task_struct *p;
8614
8615 lockdep_assert_rq_held(env->src_rq);
8616
8617 list_for_each_entry_reverse(p,
8618 &env->src_rq->cfs_tasks, se.group_node) {
8619 if (!can_migrate_task(p, env))
8620 continue;
8621
8622 detach_task(p, env);
8623
8624 /*
8625 * Right now, this is only the second place where
8626 * lb_gained[env->idle] is updated (other is detach_tasks)
8627 * so we can safely collect stats here rather than
8628 * inside detach_tasks().
8629 */
8630 schedstat_inc(env->sd->lb_gained[env->idle]);
8631 return p;
8632 }
8633 return NULL;
8634 }
8635
8636 /*
8637 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
8638 * busiest_rq, as part of a balancing operation within domain "sd".
8639 *
8640 * Returns number of detached tasks if successful and 0 otherwise.
8641 */
8642 static int detach_tasks(struct lb_env *env)
8643 {
8644 struct list_head *tasks = &env->src_rq->cfs_tasks;
8645 unsigned long util, load;
8646 struct task_struct *p;
8647 int detached = 0;
8648
8649 lockdep_assert_rq_held(env->src_rq);
8650
8651 /*
8652 * Source run queue has been emptied by another CPU, clear
8653 * LBF_ALL_PINNED flag as we will not test any task.
8654 */
8655 if (env->src_rq->nr_running <= 1) {
8656 env->flags &= ~LBF_ALL_PINNED;
8657 return 0;
8658 }
8659
8660 if (env->imbalance <= 0)
8661 return 0;
8662
8663 while (!list_empty(tasks)) {
8664 /*
8665 * We don't want to steal all, otherwise we may be treated likewise,
8666 * which could at worst lead to a livelock crash.
8667 */
8668 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
8669 break;
8670
8671 env->loop++;
8672 /*
8673 * We've more or less seen every task there is, call it quits
8674 * unless we haven't found any movable task yet.
8675 */
8676 if (env->loop > env->loop_max &&
8677 !(env->flags & LBF_ALL_PINNED))
8678 break;
8679
8680 /* take a breather every nr_migrate tasks */
8681 if (env->loop > env->loop_break) {
8682 env->loop_break += SCHED_NR_MIGRATE_BREAK;
8683 env->flags |= LBF_NEED_BREAK;
8684 break;
8685 }
8686
8687 p = list_last_entry(tasks, struct task_struct, se.group_node);
8688
8689 if (!can_migrate_task(p, env))
8690 goto next;
8691
8692 switch (env->migration_type) {
8693 case migrate_load:
8694 /*
8695 * Depending of the number of CPUs and tasks and the
8696 * cgroup hierarchy, task_h_load() can return a null
8697 * value. Make sure that env->imbalance decreases
8698 * otherwise detach_tasks() will stop only after
8699 * detaching up to loop_max tasks.
8700 */
8701 load = max_t(unsigned long, task_h_load(p), 1);
8702
8703 if (sched_feat(LB_MIN) &&
8704 load < 16 && !env->sd->nr_balance_failed)
8705 goto next;
8706
8707 /*
8708 * Make sure that we don't migrate too much load.
8709 * Nevertheless, let relax the constraint if
8710 * scheduler fails to find a good waiting task to
8711 * migrate.
8712 */
8713 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
8714 goto next;
8715
8716 env->imbalance -= load;
8717 break;
8718
8719 case migrate_util:
8720 util = task_util_est(p);
8721
8722 if (util > env->imbalance)
8723 goto next;
8724
8725 env->imbalance -= util;
8726 break;
8727
8728 case migrate_task:
8729 env->imbalance--;
8730 break;
8731
8732 case migrate_misfit:
8733 /* This is not a misfit task */
8734 if (task_fits_cpu(p, env->src_cpu))
8735 goto next;
8736
8737 env->imbalance = 0;
8738 break;
8739 }
8740
8741 detach_task(p, env);
8742 list_add(&p->se.group_node, &env->tasks);
8743
8744 detached++;
8745
8746 #ifdef CONFIG_PREEMPTION
8747 /*
8748 * NEWIDLE balancing is a source of latency, so preemptible
8749 * kernels will stop after the first task is detached to minimize
8750 * the critical section.
8751 */
8752 if (env->idle == CPU_NEWLY_IDLE)
8753 break;
8754 #endif
8755
8756 /*
8757 * We only want to steal up to the prescribed amount of
8758 * load/util/tasks.
8759 */
8760 if (env->imbalance <= 0)
8761 break;
8762
8763 continue;
8764 next:
8765 list_move(&p->se.group_node, tasks);
8766 }
8767
8768 /*
8769 * Right now, this is one of only two places we collect this stat
8770 * so we can safely collect detach_one_task() stats here rather
8771 * than inside detach_one_task().
8772 */
8773 schedstat_add(env->sd->lb_gained[env->idle], detached);
8774
8775 return detached;
8776 }
8777
8778 /*
8779 * attach_task() -- attach the task detached by detach_task() to its new rq.
8780 */
8781 static void attach_task(struct rq *rq, struct task_struct *p)
8782 {
8783 lockdep_assert_rq_held(rq);
8784
8785 WARN_ON_ONCE(task_rq(p) != rq);
8786 activate_task(rq, p, ENQUEUE_NOCLOCK);
8787 check_preempt_curr(rq, p, 0);
8788 }
8789
8790 /*
8791 * attach_one_task() -- attaches the task returned from detach_one_task() to
8792 * its new rq.
8793 */
8794 static void attach_one_task(struct rq *rq, struct task_struct *p)
8795 {
8796 struct rq_flags rf;
8797
8798 rq_lock(rq, &rf);
8799 update_rq_clock(rq);
8800 attach_task(rq, p);
8801 rq_unlock(rq, &rf);
8802 }
8803
8804 /*
8805 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
8806 * new rq.
8807 */
8808 static void attach_tasks(struct lb_env *env)
8809 {
8810 struct list_head *tasks = &env->tasks;
8811 struct task_struct *p;
8812 struct rq_flags rf;
8813
8814 rq_lock(env->dst_rq, &rf);
8815 update_rq_clock(env->dst_rq);
8816
8817 while (!list_empty(tasks)) {
8818 p = list_first_entry(tasks, struct task_struct, se.group_node);
8819 list_del_init(&p->se.group_node);
8820
8821 attach_task(env->dst_rq, p);
8822 }
8823
8824 rq_unlock(env->dst_rq, &rf);
8825 }
8826
8827 #ifdef CONFIG_NO_HZ_COMMON
8828 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
8829 {
8830 if (cfs_rq->avg.load_avg)
8831 return true;
8832
8833 if (cfs_rq->avg.util_avg)
8834 return true;
8835
8836 return false;
8837 }
8838
8839 static inline bool others_have_blocked(struct rq *rq)
8840 {
8841 if (READ_ONCE(rq->avg_rt.util_avg))
8842 return true;
8843
8844 if (READ_ONCE(rq->avg_dl.util_avg))
8845 return true;
8846
8847 if (thermal_load_avg(rq))
8848 return true;
8849
8850 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
8851 if (READ_ONCE(rq->avg_irq.util_avg))
8852 return true;
8853 #endif
8854
8855 return false;
8856 }
8857
8858 static inline void update_blocked_load_tick(struct rq *rq)
8859 {
8860 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
8861 }
8862
8863 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
8864 {
8865 if (!has_blocked)
8866 rq->has_blocked_load = 0;
8867 }
8868 #else
8869 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
8870 static inline bool others_have_blocked(struct rq *rq) { return false; }
8871 static inline void update_blocked_load_tick(struct rq *rq) {}
8872 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
8873 #endif
8874
8875 static bool __update_blocked_others(struct rq *rq, bool *done)
8876 {
8877 const struct sched_class *curr_class;
8878 u64 now = rq_clock_pelt(rq);
8879 unsigned long thermal_pressure;
8880 bool decayed;
8881
8882 /*
8883 * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
8884 * DL and IRQ signals have been updated before updating CFS.
8885 */
8886 curr_class = rq->curr->sched_class;
8887
8888 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
8889
8890 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
8891 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
8892 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
8893 update_irq_load_avg(rq, 0);
8894
8895 if (others_have_blocked(rq))
8896 *done = false;
8897
8898 return decayed;
8899 }
8900
8901 #ifdef CONFIG_FAIR_GROUP_SCHED
8902
8903 static bool __update_blocked_fair(struct rq *rq, bool *done)
8904 {
8905 struct cfs_rq *cfs_rq, *pos;
8906 bool decayed = false;
8907 int cpu = cpu_of(rq);
8908
8909 /*
8910 * Iterates the task_group tree in a bottom up fashion, see
8911 * list_add_leaf_cfs_rq() for details.
8912 */
8913 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
8914 struct sched_entity *se;
8915
8916 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
8917 update_tg_load_avg(cfs_rq);
8918
8919 if (cfs_rq->nr_running == 0)
8920 update_idle_cfs_rq_clock_pelt(cfs_rq);
8921
8922 if (cfs_rq == &rq->cfs)
8923 decayed = true;
8924 }
8925
8926 /* Propagate pending load changes to the parent, if any: */
8927 se = cfs_rq->tg->se[cpu];
8928 if (se && !skip_blocked_update(se))
8929 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
8930
8931 /*
8932 * There can be a lot of idle CPU cgroups. Don't let fully
8933 * decayed cfs_rqs linger on the list.
8934 */
8935 if (cfs_rq_is_decayed(cfs_rq))
8936 list_del_leaf_cfs_rq(cfs_rq);
8937
8938 /* Don't need periodic decay once load/util_avg are null */
8939 if (cfs_rq_has_blocked(cfs_rq))
8940 *done = false;
8941 }
8942
8943 return decayed;
8944 }
8945
8946 /*
8947 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
8948 * This needs to be done in a top-down fashion because the load of a child
8949 * group is a fraction of its parents load.
8950 */
8951 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
8952 {
8953 struct rq *rq = rq_of(cfs_rq);
8954 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
8955 unsigned long now = jiffies;
8956 unsigned long load;
8957
8958 if (cfs_rq->last_h_load_update == now)
8959 return;
8960
8961 WRITE_ONCE(cfs_rq->h_load_next, NULL);
8962 for_each_sched_entity(se) {
8963 cfs_rq = cfs_rq_of(se);
8964 WRITE_ONCE(cfs_rq->h_load_next, se);
8965 if (cfs_rq->last_h_load_update == now)
8966 break;
8967 }
8968
8969 if (!se) {
8970 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
8971 cfs_rq->last_h_load_update = now;
8972 }
8973
8974 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
8975 load = cfs_rq->h_load;
8976 load = div64_ul(load * se->avg.load_avg,
8977 cfs_rq_load_avg(cfs_rq) + 1);
8978 cfs_rq = group_cfs_rq(se);
8979 cfs_rq->h_load = load;
8980 cfs_rq->last_h_load_update = now;
8981 }
8982 }
8983
8984 static unsigned long task_h_load(struct task_struct *p)
8985 {
8986 struct cfs_rq *cfs_rq = task_cfs_rq(p);
8987
8988 update_cfs_rq_h_load(cfs_rq);
8989 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
8990 cfs_rq_load_avg(cfs_rq) + 1);
8991 }
8992 #else
8993 static bool __update_blocked_fair(struct rq *rq, bool *done)
8994 {
8995 struct cfs_rq *cfs_rq = &rq->cfs;
8996 bool decayed;
8997
8998 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
8999 if (cfs_rq_has_blocked(cfs_rq))
9000 *done = false;
9001
9002 return decayed;
9003 }
9004
9005 static unsigned long task_h_load(struct task_struct *p)
9006 {
9007 return p->se.avg.load_avg;
9008 }
9009 #endif
9010
9011 static void update_blocked_averages(int cpu)
9012 {
9013 bool decayed = false, done = true;
9014 struct rq *rq = cpu_rq(cpu);
9015 struct rq_flags rf;
9016
9017 rq_lock_irqsave(rq, &rf);
9018 update_blocked_load_tick(rq);
9019 update_rq_clock(rq);
9020
9021 decayed |= __update_blocked_others(rq, &done);
9022 decayed |= __update_blocked_fair(rq, &done);
9023
9024 update_blocked_load_status(rq, !done);
9025 if (decayed)
9026 cpufreq_update_util(rq, 0);
9027 rq_unlock_irqrestore(rq, &rf);
9028 }
9029
9030 /********** Helpers for find_busiest_group ************************/
9031
9032 /*
9033 * sg_lb_stats - stats of a sched_group required for load_balancing
9034 */
9035 struct sg_lb_stats {
9036 unsigned long avg_load; /*Avg load across the CPUs of the group */
9037 unsigned long group_load; /* Total load over the CPUs of the group */
9038 unsigned long group_capacity;
9039 unsigned long group_util; /* Total utilization over the CPUs of the group */
9040 unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
9041 unsigned int sum_nr_running; /* Nr of tasks running in the group */
9042 unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
9043 unsigned int idle_cpus;
9044 unsigned int group_weight;
9045 enum group_type group_type;
9046 unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
9047 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
9048 #ifdef CONFIG_NUMA_BALANCING
9049 unsigned int nr_numa_running;
9050 unsigned int nr_preferred_running;
9051 #endif
9052 };
9053
9054 /*
9055 * sd_lb_stats - Structure to store the statistics of a sched_domain
9056 * during load balancing.
9057 */
9058 struct sd_lb_stats {
9059 struct sched_group *busiest; /* Busiest group in this sd */
9060 struct sched_group *local; /* Local group in this sd */
9061 unsigned long total_load; /* Total load of all groups in sd */
9062 unsigned long total_capacity; /* Total capacity of all groups in sd */
9063 unsigned long avg_load; /* Average load across all groups in sd */
9064 unsigned int prefer_sibling; /* tasks should go to sibling first */
9065
9066 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
9067 struct sg_lb_stats local_stat; /* Statistics of the local group */
9068 };
9069
9070 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
9071 {
9072 /*
9073 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
9074 * local_stat because update_sg_lb_stats() does a full clear/assignment.
9075 * We must however set busiest_stat::group_type and
9076 * busiest_stat::idle_cpus to the worst busiest group because
9077 * update_sd_pick_busiest() reads these before assignment.
9078 */
9079 *sds = (struct sd_lb_stats){
9080 .busiest = NULL,
9081 .local = NULL,
9082 .total_load = 0UL,
9083 .total_capacity = 0UL,
9084 .busiest_stat = {
9085 .idle_cpus = UINT_MAX,
9086 .group_type = group_has_spare,
9087 },
9088 };
9089 }
9090
9091 static unsigned long scale_rt_capacity(int cpu)
9092 {
9093 struct rq *rq = cpu_rq(cpu);
9094 unsigned long max = arch_scale_cpu_capacity(cpu);
9095 unsigned long used, free;
9096 unsigned long irq;
9097
9098 irq = cpu_util_irq(rq);
9099
9100 if (unlikely(irq >= max))
9101 return 1;
9102
9103 /*
9104 * avg_rt.util_avg and avg_dl.util_avg track binary signals
9105 * (running and not running) with weights 0 and 1024 respectively.
9106 * avg_thermal.load_avg tracks thermal pressure and the weighted
9107 * average uses the actual delta max capacity(load).
9108 */
9109 used = READ_ONCE(rq->avg_rt.util_avg);
9110 used += READ_ONCE(rq->avg_dl.util_avg);
9111 used += thermal_load_avg(rq);
9112
9113 if (unlikely(used >= max))
9114 return 1;
9115
9116 free = max - used;
9117
9118 return scale_irq_capacity(free, irq, max);
9119 }
9120
9121 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
9122 {
9123 unsigned long capacity = scale_rt_capacity(cpu);
9124 struct sched_group *sdg = sd->groups;
9125
9126 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
9127
9128 if (!capacity)
9129 capacity = 1;
9130
9131 cpu_rq(cpu)->cpu_capacity = capacity;
9132 trace_sched_cpu_capacity_tp(cpu_rq(cpu));
9133
9134 sdg->sgc->capacity = capacity;
9135 sdg->sgc->min_capacity = capacity;
9136 sdg->sgc->max_capacity = capacity;
9137 }
9138
9139 void update_group_capacity(struct sched_domain *sd, int cpu)
9140 {
9141 struct sched_domain *child = sd->child;
9142 struct sched_group *group, *sdg = sd->groups;
9143 unsigned long capacity, min_capacity, max_capacity;
9144 unsigned long interval;
9145
9146 interval = msecs_to_jiffies(sd->balance_interval);
9147 interval = clamp(interval, 1UL, max_load_balance_interval);
9148 sdg->sgc->next_update = jiffies + interval;
9149
9150 if (!child) {
9151 update_cpu_capacity(sd, cpu);
9152 return;
9153 }
9154
9155 capacity = 0;
9156 min_capacity = ULONG_MAX;
9157 max_capacity = 0;
9158
9159 if (child->flags & SD_OVERLAP) {
9160 /*
9161 * SD_OVERLAP domains cannot assume that child groups
9162 * span the current group.
9163 */
9164
9165 for_each_cpu(cpu, sched_group_span(sdg)) {
9166 unsigned long cpu_cap = capacity_of(cpu);
9167
9168 capacity += cpu_cap;
9169 min_capacity = min(cpu_cap, min_capacity);
9170 max_capacity = max(cpu_cap, max_capacity);
9171 }
9172 } else {
9173 /*
9174 * !SD_OVERLAP domains can assume that child groups
9175 * span the current group.
9176 */
9177
9178 group = child->groups;
9179 do {
9180 struct sched_group_capacity *sgc = group->sgc;
9181
9182 capacity += sgc->capacity;
9183 min_capacity = min(sgc->min_capacity, min_capacity);
9184 max_capacity = max(sgc->max_capacity, max_capacity);
9185 group = group->next;
9186 } while (group != child->groups);
9187 }
9188
9189 sdg->sgc->capacity = capacity;
9190 sdg->sgc->min_capacity = min_capacity;
9191 sdg->sgc->max_capacity = max_capacity;
9192 }
9193
9194 /*
9195 * Check whether the capacity of the rq has been noticeably reduced by side
9196 * activity. The imbalance_pct is used for the threshold.
9197 * Return true is the capacity is reduced
9198 */
9199 static inline int
9200 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
9201 {
9202 return ((rq->cpu_capacity * sd->imbalance_pct) <
9203 (rq->cpu_capacity_orig * 100));
9204 }
9205
9206 /*
9207 * Check whether a rq has a misfit task and if it looks like we can actually
9208 * help that task: we can migrate the task to a CPU of higher capacity, or
9209 * the task's current CPU is heavily pressured.
9210 */
9211 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
9212 {
9213 return rq->misfit_task_load &&
9214 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
9215 check_cpu_capacity(rq, sd));
9216 }
9217
9218 /*
9219 * Group imbalance indicates (and tries to solve) the problem where balancing
9220 * groups is inadequate due to ->cpus_ptr constraints.
9221 *
9222 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
9223 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
9224 * Something like:
9225 *
9226 * { 0 1 2 3 } { 4 5 6 7 }
9227 * * * * *
9228 *
9229 * If we were to balance group-wise we'd place two tasks in the first group and
9230 * two tasks in the second group. Clearly this is undesired as it will overload
9231 * cpu 3 and leave one of the CPUs in the second group unused.
9232 *
9233 * The current solution to this issue is detecting the skew in the first group
9234 * by noticing the lower domain failed to reach balance and had difficulty
9235 * moving tasks due to affinity constraints.
9236 *
9237 * When this is so detected; this group becomes a candidate for busiest; see
9238 * update_sd_pick_busiest(). And calculate_imbalance() and
9239 * find_busiest_group() avoid some of the usual balance conditions to allow it
9240 * to create an effective group imbalance.
9241 *
9242 * This is a somewhat tricky proposition since the next run might not find the
9243 * group imbalance and decide the groups need to be balanced again. A most
9244 * subtle and fragile situation.
9245 */
9246
9247 static inline int sg_imbalanced(struct sched_group *group)
9248 {
9249 return group->sgc->imbalance;
9250 }
9251
9252 /*
9253 * group_has_capacity returns true if the group has spare capacity that could
9254 * be used by some tasks.
9255 * We consider that a group has spare capacity if the number of task is
9256 * smaller than the number of CPUs or if the utilization is lower than the
9257 * available capacity for CFS tasks.
9258 * For the latter, we use a threshold to stabilize the state, to take into
9259 * account the variance of the tasks' load and to return true if the available
9260 * capacity in meaningful for the load balancer.
9261 * As an example, an available capacity of 1% can appear but it doesn't make
9262 * any benefit for the load balance.
9263 */
9264 static inline bool
9265 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9266 {
9267 if (sgs->sum_nr_running < sgs->group_weight)
9268 return true;
9269
9270 if ((sgs->group_capacity * imbalance_pct) <
9271 (sgs->group_runnable * 100))
9272 return false;
9273
9274 if ((sgs->group_capacity * 100) >
9275 (sgs->group_util * imbalance_pct))
9276 return true;
9277
9278 return false;
9279 }
9280
9281 /*
9282 * group_is_overloaded returns true if the group has more tasks than it can
9283 * handle.
9284 * group_is_overloaded is not equals to !group_has_capacity because a group
9285 * with the exact right number of tasks, has no more spare capacity but is not
9286 * overloaded so both group_has_capacity and group_is_overloaded return
9287 * false.
9288 */
9289 static inline bool
9290 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9291 {
9292 if (sgs->sum_nr_running <= sgs->group_weight)
9293 return false;
9294
9295 if ((sgs->group_capacity * 100) <
9296 (sgs->group_util * imbalance_pct))
9297 return true;
9298
9299 if ((sgs->group_capacity * imbalance_pct) <
9300 (sgs->group_runnable * 100))
9301 return true;
9302
9303 return false;
9304 }
9305
9306 static inline enum
9307 group_type group_classify(unsigned int imbalance_pct,
9308 struct sched_group *group,
9309 struct sg_lb_stats *sgs)
9310 {
9311 if (group_is_overloaded(imbalance_pct, sgs))
9312 return group_overloaded;
9313
9314 if (sg_imbalanced(group))
9315 return group_imbalanced;
9316
9317 if (sgs->group_asym_packing)
9318 return group_asym_packing;
9319
9320 if (sgs->group_misfit_task_load)
9321 return group_misfit_task;
9322
9323 if (!group_has_capacity(imbalance_pct, sgs))
9324 return group_fully_busy;
9325
9326 return group_has_spare;
9327 }
9328
9329 /**
9330 * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
9331 * @dst_cpu: Destination CPU of the load balancing
9332 * @sds: Load-balancing data with statistics of the local group
9333 * @sgs: Load-balancing statistics of the candidate busiest group
9334 * @sg: The candidate busiest group
9335 *
9336 * Check the state of the SMT siblings of both @sds::local and @sg and decide
9337 * if @dst_cpu can pull tasks.
9338 *
9339 * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
9340 * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
9341 * only if @dst_cpu has higher priority.
9342 *
9343 * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more
9344 * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority.
9345 * Bigger imbalances in the number of busy CPUs will be dealt with in
9346 * update_sd_pick_busiest().
9347 *
9348 * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
9349 * of @dst_cpu are idle and @sg has lower priority.
9350 *
9351 * Return: true if @dst_cpu can pull tasks, false otherwise.
9352 */
9353 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
9354 struct sg_lb_stats *sgs,
9355 struct sched_group *sg)
9356 {
9357 #ifdef CONFIG_SCHED_SMT
9358 bool local_is_smt, sg_is_smt;
9359 int sg_busy_cpus;
9360
9361 local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY;
9362 sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY;
9363
9364 sg_busy_cpus = sgs->group_weight - sgs->idle_cpus;
9365
9366 if (!local_is_smt) {
9367 /*
9368 * If we are here, @dst_cpu is idle and does not have SMT
9369 * siblings. Pull tasks if candidate group has two or more
9370 * busy CPUs.
9371 */
9372 if (sg_busy_cpus >= 2) /* implies sg_is_smt */
9373 return true;
9374
9375 /*
9376 * @dst_cpu does not have SMT siblings. @sg may have SMT
9377 * siblings and only one is busy. In such case, @dst_cpu
9378 * can help if it has higher priority and is idle (i.e.,
9379 * it has no running tasks).
9380 */
9381 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
9382 }
9383
9384 /* @dst_cpu has SMT siblings. */
9385
9386 if (sg_is_smt) {
9387 int local_busy_cpus = sds->local->group_weight -
9388 sds->local_stat.idle_cpus;
9389 int busy_cpus_delta = sg_busy_cpus - local_busy_cpus;
9390
9391 if (busy_cpus_delta == 1)
9392 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
9393
9394 return false;
9395 }
9396
9397 /*
9398 * @sg does not have SMT siblings. Ensure that @sds::local does not end
9399 * up with more than one busy SMT sibling and only pull tasks if there
9400 * are not busy CPUs (i.e., no CPU has running tasks).
9401 */
9402 if (!sds->local_stat.sum_nr_running)
9403 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
9404
9405 return false;
9406 #else
9407 /* Always return false so that callers deal with non-SMT cases. */
9408 return false;
9409 #endif
9410 }
9411
9412 static inline bool
9413 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
9414 struct sched_group *group)
9415 {
9416 /* Only do SMT checks if either local or candidate have SMT siblings */
9417 if ((sds->local->flags & SD_SHARE_CPUCAPACITY) ||
9418 (group->flags & SD_SHARE_CPUCAPACITY))
9419 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group);
9420
9421 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
9422 }
9423
9424 static inline bool
9425 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
9426 {
9427 /*
9428 * When there is more than 1 task, the group_overloaded case already
9429 * takes care of cpu with reduced capacity
9430 */
9431 if (rq->cfs.h_nr_running != 1)
9432 return false;
9433
9434 return check_cpu_capacity(rq, sd);
9435 }
9436
9437 /**
9438 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
9439 * @env: The load balancing environment.
9440 * @sds: Load-balancing data with statistics of the local group.
9441 * @group: sched_group whose statistics are to be updated.
9442 * @sgs: variable to hold the statistics for this group.
9443 * @sg_status: Holds flag indicating the status of the sched_group
9444 */
9445 static inline void update_sg_lb_stats(struct lb_env *env,
9446 struct sd_lb_stats *sds,
9447 struct sched_group *group,
9448 struct sg_lb_stats *sgs,
9449 int *sg_status)
9450 {
9451 int i, nr_running, local_group;
9452
9453 memset(sgs, 0, sizeof(*sgs));
9454
9455 local_group = group == sds->local;
9456
9457 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
9458 struct rq *rq = cpu_rq(i);
9459 unsigned long load = cpu_load(rq);
9460
9461 sgs->group_load += load;
9462 sgs->group_util += cpu_util_cfs(i);
9463 sgs->group_runnable += cpu_runnable(rq);
9464 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
9465
9466 nr_running = rq->nr_running;
9467 sgs->sum_nr_running += nr_running;
9468
9469 if (nr_running > 1)
9470 *sg_status |= SG_OVERLOAD;
9471
9472 if (cpu_overutilized(i))
9473 *sg_status |= SG_OVERUTILIZED;
9474
9475 #ifdef CONFIG_NUMA_BALANCING
9476 sgs->nr_numa_running += rq->nr_numa_running;
9477 sgs->nr_preferred_running += rq->nr_preferred_running;
9478 #endif
9479 /*
9480 * No need to call idle_cpu() if nr_running is not 0
9481 */
9482 if (!nr_running && idle_cpu(i)) {
9483 sgs->idle_cpus++;
9484 /* Idle cpu can't have misfit task */
9485 continue;
9486 }
9487
9488 if (local_group)
9489 continue;
9490
9491 if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
9492 /* Check for a misfit task on the cpu */
9493 if (sgs->group_misfit_task_load < rq->misfit_task_load) {
9494 sgs->group_misfit_task_load = rq->misfit_task_load;
9495 *sg_status |= SG_OVERLOAD;
9496 }
9497 } else if ((env->idle != CPU_NOT_IDLE) &&
9498 sched_reduced_capacity(rq, env->sd)) {
9499 /* Check for a task running on a CPU with reduced capacity */
9500 if (sgs->group_misfit_task_load < load)
9501 sgs->group_misfit_task_load = load;
9502 }
9503 }
9504
9505 sgs->group_capacity = group->sgc->capacity;
9506
9507 sgs->group_weight = group->group_weight;
9508
9509 /* Check if dst CPU is idle and preferred to this group */
9510 if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
9511 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9512 sched_asym(env, sds, sgs, group)) {
9513 sgs->group_asym_packing = 1;
9514 }
9515
9516 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
9517
9518 /* Computing avg_load makes sense only when group is overloaded */
9519 if (sgs->group_type == group_overloaded)
9520 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
9521 sgs->group_capacity;
9522 }
9523
9524 /**
9525 * update_sd_pick_busiest - return 1 on busiest group
9526 * @env: The load balancing environment.
9527 * @sds: sched_domain statistics
9528 * @sg: sched_group candidate to be checked for being the busiest
9529 * @sgs: sched_group statistics
9530 *
9531 * Determine if @sg is a busier group than the previously selected
9532 * busiest group.
9533 *
9534 * Return: %true if @sg is a busier group than the previously selected
9535 * busiest group. %false otherwise.
9536 */
9537 static bool update_sd_pick_busiest(struct lb_env *env,
9538 struct sd_lb_stats *sds,
9539 struct sched_group *sg,
9540 struct sg_lb_stats *sgs)
9541 {
9542 struct sg_lb_stats *busiest = &sds->busiest_stat;
9543
9544 /* Make sure that there is at least one task to pull */
9545 if (!sgs->sum_h_nr_running)
9546 return false;
9547
9548 /*
9549 * Don't try to pull misfit tasks we can't help.
9550 * We can use max_capacity here as reduction in capacity on some
9551 * CPUs in the group should either be possible to resolve
9552 * internally or be covered by avg_load imbalance (eventually).
9553 */
9554 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
9555 (sgs->group_type == group_misfit_task) &&
9556 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
9557 sds->local_stat.group_type != group_has_spare))
9558 return false;
9559
9560 if (sgs->group_type > busiest->group_type)
9561 return true;
9562
9563 if (sgs->group_type < busiest->group_type)
9564 return false;
9565
9566 /*
9567 * The candidate and the current busiest group are the same type of
9568 * group. Let check which one is the busiest according to the type.
9569 */
9570
9571 switch (sgs->group_type) {
9572 case group_overloaded:
9573 /* Select the overloaded group with highest avg_load. */
9574 if (sgs->avg_load <= busiest->avg_load)
9575 return false;
9576 break;
9577
9578 case group_imbalanced:
9579 /*
9580 * Select the 1st imbalanced group as we don't have any way to
9581 * choose one more than another.
9582 */
9583 return false;
9584
9585 case group_asym_packing:
9586 /* Prefer to move from lowest priority CPU's work */
9587 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
9588 return false;
9589 break;
9590
9591 case group_misfit_task:
9592 /*
9593 * If we have more than one misfit sg go with the biggest
9594 * misfit.
9595 */
9596 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
9597 return false;
9598 break;
9599
9600 case group_fully_busy:
9601 /*
9602 * Select the fully busy group with highest avg_load. In
9603 * theory, there is no need to pull task from such kind of
9604 * group because tasks have all compute capacity that they need
9605 * but we can still improve the overall throughput by reducing
9606 * contention when accessing shared HW resources.
9607 *
9608 * XXX for now avg_load is not computed and always 0 so we
9609 * select the 1st one.
9610 */
9611 if (sgs->avg_load <= busiest->avg_load)
9612 return false;
9613 break;
9614
9615 case group_has_spare:
9616 /*
9617 * Select not overloaded group with lowest number of idle cpus
9618 * and highest number of running tasks. We could also compare
9619 * the spare capacity which is more stable but it can end up
9620 * that the group has less spare capacity but finally more idle
9621 * CPUs which means less opportunity to pull tasks.
9622 */
9623 if (sgs->idle_cpus > busiest->idle_cpus)
9624 return false;
9625 else if ((sgs->idle_cpus == busiest->idle_cpus) &&
9626 (sgs->sum_nr_running <= busiest->sum_nr_running))
9627 return false;
9628
9629 break;
9630 }
9631
9632 /*
9633 * Candidate sg has no more than one task per CPU and has higher
9634 * per-CPU capacity. Migrating tasks to less capable CPUs may harm
9635 * throughput. Maximize throughput, power/energy consequences are not
9636 * considered.
9637 */
9638 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
9639 (sgs->group_type <= group_fully_busy) &&
9640 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
9641 return false;
9642
9643 return true;
9644 }
9645
9646 #ifdef CONFIG_NUMA_BALANCING
9647 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9648 {
9649 if (sgs->sum_h_nr_running > sgs->nr_numa_running)
9650 return regular;
9651 if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
9652 return remote;
9653 return all;
9654 }
9655
9656 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9657 {
9658 if (rq->nr_running > rq->nr_numa_running)
9659 return regular;
9660 if (rq->nr_running > rq->nr_preferred_running)
9661 return remote;
9662 return all;
9663 }
9664 #else
9665 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
9666 {
9667 return all;
9668 }
9669
9670 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
9671 {
9672 return regular;
9673 }
9674 #endif /* CONFIG_NUMA_BALANCING */
9675
9676
9677 struct sg_lb_stats;
9678
9679 /*
9680 * task_running_on_cpu - return 1 if @p is running on @cpu.
9681 */
9682
9683 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
9684 {
9685 /* Task has no contribution or is new */
9686 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
9687 return 0;
9688
9689 if (task_on_rq_queued(p))
9690 return 1;
9691
9692 return 0;
9693 }
9694
9695 /**
9696 * idle_cpu_without - would a given CPU be idle without p ?
9697 * @cpu: the processor on which idleness is tested.
9698 * @p: task which should be ignored.
9699 *
9700 * Return: 1 if the CPU would be idle. 0 otherwise.
9701 */
9702 static int idle_cpu_without(int cpu, struct task_struct *p)
9703 {
9704 struct rq *rq = cpu_rq(cpu);
9705
9706 if (rq->curr != rq->idle && rq->curr != p)
9707 return 0;
9708
9709 /*
9710 * rq->nr_running can't be used but an updated version without the
9711 * impact of p on cpu must be used instead. The updated nr_running
9712 * be computed and tested before calling idle_cpu_without().
9713 */
9714
9715 #ifdef CONFIG_SMP
9716 if (rq->ttwu_pending)
9717 return 0;
9718 #endif
9719
9720 return 1;
9721 }
9722
9723 /*
9724 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
9725 * @sd: The sched_domain level to look for idlest group.
9726 * @group: sched_group whose statistics are to be updated.
9727 * @sgs: variable to hold the statistics for this group.
9728 * @p: The task for which we look for the idlest group/CPU.
9729 */
9730 static inline void update_sg_wakeup_stats(struct sched_domain *sd,
9731 struct sched_group *group,
9732 struct sg_lb_stats *sgs,
9733 struct task_struct *p)
9734 {
9735 int i, nr_running;
9736
9737 memset(sgs, 0, sizeof(*sgs));
9738
9739 /* Assume that task can't fit any CPU of the group */
9740 if (sd->flags & SD_ASYM_CPUCAPACITY)
9741 sgs->group_misfit_task_load = 1;
9742
9743 for_each_cpu(i, sched_group_span(group)) {
9744 struct rq *rq = cpu_rq(i);
9745 unsigned int local;
9746
9747 sgs->group_load += cpu_load_without(rq, p);
9748 sgs->group_util += cpu_util_without(i, p);
9749 sgs->group_runnable += cpu_runnable_without(rq, p);
9750 local = task_running_on_cpu(i, p);
9751 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
9752
9753 nr_running = rq->nr_running - local;
9754 sgs->sum_nr_running += nr_running;
9755
9756 /*
9757 * No need to call idle_cpu_without() if nr_running is not 0
9758 */
9759 if (!nr_running && idle_cpu_without(i, p))
9760 sgs->idle_cpus++;
9761
9762 /* Check if task fits in the CPU */
9763 if (sd->flags & SD_ASYM_CPUCAPACITY &&
9764 sgs->group_misfit_task_load &&
9765 task_fits_cpu(p, i))
9766 sgs->group_misfit_task_load = 0;
9767
9768 }
9769
9770 sgs->group_capacity = group->sgc->capacity;
9771
9772 sgs->group_weight = group->group_weight;
9773
9774 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
9775
9776 /*
9777 * Computing avg_load makes sense only when group is fully busy or
9778 * overloaded
9779 */
9780 if (sgs->group_type == group_fully_busy ||
9781 sgs->group_type == group_overloaded)
9782 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
9783 sgs->group_capacity;
9784 }
9785
9786 static bool update_pick_idlest(struct sched_group *idlest,
9787 struct sg_lb_stats *idlest_sgs,
9788 struct sched_group *group,
9789 struct sg_lb_stats *sgs)
9790 {
9791 if (sgs->group_type < idlest_sgs->group_type)
9792 return true;
9793
9794 if (sgs->group_type > idlest_sgs->group_type)
9795 return false;
9796
9797 /*
9798 * The candidate and the current idlest group are the same type of
9799 * group. Let check which one is the idlest according to the type.
9800 */
9801
9802 switch (sgs->group_type) {
9803 case group_overloaded:
9804 case group_fully_busy:
9805 /* Select the group with lowest avg_load. */
9806 if (idlest_sgs->avg_load <= sgs->avg_load)
9807 return false;
9808 break;
9809
9810 case group_imbalanced:
9811 case group_asym_packing:
9812 /* Those types are not used in the slow wakeup path */
9813 return false;
9814
9815 case group_misfit_task:
9816 /* Select group with the highest max capacity */
9817 if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
9818 return false;
9819 break;
9820
9821 case group_has_spare:
9822 /* Select group with most idle CPUs */
9823 if (idlest_sgs->idle_cpus > sgs->idle_cpus)
9824 return false;
9825
9826 /* Select group with lowest group_util */
9827 if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
9828 idlest_sgs->group_util <= sgs->group_util)
9829 return false;
9830
9831 break;
9832 }
9833
9834 return true;
9835 }
9836
9837 /*
9838 * find_idlest_group() finds and returns the least busy CPU group within the
9839 * domain.
9840 *
9841 * Assumes p is allowed on at least one CPU in sd.
9842 */
9843 static struct sched_group *
9844 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
9845 {
9846 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
9847 struct sg_lb_stats local_sgs, tmp_sgs;
9848 struct sg_lb_stats *sgs;
9849 unsigned long imbalance;
9850 struct sg_lb_stats idlest_sgs = {
9851 .avg_load = UINT_MAX,
9852 .group_type = group_overloaded,
9853 };
9854
9855 do {
9856 int local_group;
9857
9858 /* Skip over this group if it has no CPUs allowed */
9859 if (!cpumask_intersects(sched_group_span(group),
9860 p->cpus_ptr))
9861 continue;
9862
9863 /* Skip over this group if no cookie matched */
9864 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
9865 continue;
9866
9867 local_group = cpumask_test_cpu(this_cpu,
9868 sched_group_span(group));
9869
9870 if (local_group) {
9871 sgs = &local_sgs;
9872 local = group;
9873 } else {
9874 sgs = &tmp_sgs;
9875 }
9876
9877 update_sg_wakeup_stats(sd, group, sgs, p);
9878
9879 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
9880 idlest = group;
9881 idlest_sgs = *sgs;
9882 }
9883
9884 } while (group = group->next, group != sd->groups);
9885
9886
9887 /* There is no idlest group to push tasks to */
9888 if (!idlest)
9889 return NULL;
9890
9891 /* The local group has been skipped because of CPU affinity */
9892 if (!local)
9893 return idlest;
9894
9895 /*
9896 * If the local group is idler than the selected idlest group
9897 * don't try and push the task.
9898 */
9899 if (local_sgs.group_type < idlest_sgs.group_type)
9900 return NULL;
9901
9902 /*
9903 * If the local group is busier than the selected idlest group
9904 * try and push the task.
9905 */
9906 if (local_sgs.group_type > idlest_sgs.group_type)
9907 return idlest;
9908
9909 switch (local_sgs.group_type) {
9910 case group_overloaded:
9911 case group_fully_busy:
9912
9913 /* Calculate allowed imbalance based on load */
9914 imbalance = scale_load_down(NICE_0_LOAD) *
9915 (sd->imbalance_pct-100) / 100;
9916
9917 /*
9918 * When comparing groups across NUMA domains, it's possible for
9919 * the local domain to be very lightly loaded relative to the
9920 * remote domains but "imbalance" skews the comparison making
9921 * remote CPUs look much more favourable. When considering
9922 * cross-domain, add imbalance to the load on the remote node
9923 * and consider staying local.
9924 */
9925
9926 if ((sd->flags & SD_NUMA) &&
9927 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
9928 return NULL;
9929
9930 /*
9931 * If the local group is less loaded than the selected
9932 * idlest group don't try and push any tasks.
9933 */
9934 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
9935 return NULL;
9936
9937 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
9938 return NULL;
9939 break;
9940
9941 case group_imbalanced:
9942 case group_asym_packing:
9943 /* Those type are not used in the slow wakeup path */
9944 return NULL;
9945
9946 case group_misfit_task:
9947 /* Select group with the highest max capacity */
9948 if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
9949 return NULL;
9950 break;
9951
9952 case group_has_spare:
9953 #ifdef CONFIG_NUMA
9954 if (sd->flags & SD_NUMA) {
9955 int imb_numa_nr = sd->imb_numa_nr;
9956 #ifdef CONFIG_NUMA_BALANCING
9957 int idlest_cpu;
9958 /*
9959 * If there is spare capacity at NUMA, try to select
9960 * the preferred node
9961 */
9962 if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
9963 return NULL;
9964
9965 idlest_cpu = cpumask_first(sched_group_span(idlest));
9966 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
9967 return idlest;
9968 #endif /* CONFIG_NUMA_BALANCING */
9969 /*
9970 * Otherwise, keep the task close to the wakeup source
9971 * and improve locality if the number of running tasks
9972 * would remain below threshold where an imbalance is
9973 * allowed while accounting for the possibility the
9974 * task is pinned to a subset of CPUs. If there is a
9975 * real need of migration, periodic load balance will
9976 * take care of it.
9977 */
9978 if (p->nr_cpus_allowed != NR_CPUS) {
9979 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
9980
9981 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
9982 imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
9983 }
9984
9985 imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
9986 if (!adjust_numa_imbalance(imbalance,
9987 local_sgs.sum_nr_running + 1,
9988 imb_numa_nr)) {
9989 return NULL;
9990 }
9991 }
9992 #endif /* CONFIG_NUMA */
9993
9994 /*
9995 * Select group with highest number of idle CPUs. We could also
9996 * compare the utilization which is more stable but it can end
9997 * up that the group has less spare capacity but finally more
9998 * idle CPUs which means more opportunity to run task.
9999 */
10000 if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
10001 return NULL;
10002 break;
10003 }
10004
10005 return idlest;
10006 }
10007
10008 static void update_idle_cpu_scan(struct lb_env *env,
10009 unsigned long sum_util)
10010 {
10011 struct sched_domain_shared *sd_share;
10012 int llc_weight, pct;
10013 u64 x, y, tmp;
10014 /*
10015 * Update the number of CPUs to scan in LLC domain, which could
10016 * be used as a hint in select_idle_cpu(). The update of sd_share
10017 * could be expensive because it is within a shared cache line.
10018 * So the write of this hint only occurs during periodic load
10019 * balancing, rather than CPU_NEWLY_IDLE, because the latter
10020 * can fire way more frequently than the former.
10021 */
10022 if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
10023 return;
10024
10025 llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
10026 if (env->sd->span_weight != llc_weight)
10027 return;
10028
10029 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
10030 if (!sd_share)
10031 return;
10032
10033 /*
10034 * The number of CPUs to search drops as sum_util increases, when
10035 * sum_util hits 85% or above, the scan stops.
10036 * The reason to choose 85% as the threshold is because this is the
10037 * imbalance_pct(117) when a LLC sched group is overloaded.
10038 *
10039 * let y = SCHED_CAPACITY_SCALE - p * x^2 [1]
10040 * and y'= y / SCHED_CAPACITY_SCALE
10041 *
10042 * x is the ratio of sum_util compared to the CPU capacity:
10043 * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
10044 * y' is the ratio of CPUs to be scanned in the LLC domain,
10045 * and the number of CPUs to scan is calculated by:
10046 *
10047 * nr_scan = llc_weight * y' [2]
10048 *
10049 * When x hits the threshold of overloaded, AKA, when
10050 * x = 100 / pct, y drops to 0. According to [1],
10051 * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
10052 *
10053 * Scale x by SCHED_CAPACITY_SCALE:
10054 * x' = sum_util / llc_weight; [3]
10055 *
10056 * and finally [1] becomes:
10057 * y = SCHED_CAPACITY_SCALE -
10058 * x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE) [4]
10059 *
10060 */
10061 /* equation [3] */
10062 x = sum_util;
10063 do_div(x, llc_weight);
10064
10065 /* equation [4] */
10066 pct = env->sd->imbalance_pct;
10067 tmp = x * x * pct * pct;
10068 do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
10069 tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
10070 y = SCHED_CAPACITY_SCALE - tmp;
10071
10072 /* equation [2] */
10073 y *= llc_weight;
10074 do_div(y, SCHED_CAPACITY_SCALE);
10075 if ((int)y != sd_share->nr_idle_scan)
10076 WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
10077 }
10078
10079 /**
10080 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
10081 * @env: The load balancing environment.
10082 * @sds: variable to hold the statistics for this sched_domain.
10083 */
10084
10085 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
10086 {
10087 struct sched_domain *child = env->sd->child;
10088 struct sched_group *sg = env->sd->groups;
10089 struct sg_lb_stats *local = &sds->local_stat;
10090 struct sg_lb_stats tmp_sgs;
10091 unsigned long sum_util = 0;
10092 int sg_status = 0;
10093
10094 do {
10095 struct sg_lb_stats *sgs = &tmp_sgs;
10096 int local_group;
10097
10098 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
10099 if (local_group) {
10100 sds->local = sg;
10101 sgs = local;
10102
10103 if (env->idle != CPU_NEWLY_IDLE ||
10104 time_after_eq(jiffies, sg->sgc->next_update))
10105 update_group_capacity(env->sd, env->dst_cpu);
10106 }
10107
10108 update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
10109
10110 if (local_group)
10111 goto next_group;
10112
10113
10114 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
10115 sds->busiest = sg;
10116 sds->busiest_stat = *sgs;
10117 }
10118
10119 next_group:
10120 /* Now, start updating sd_lb_stats */
10121 sds->total_load += sgs->group_load;
10122 sds->total_capacity += sgs->group_capacity;
10123
10124 sum_util += sgs->group_util;
10125 sg = sg->next;
10126 } while (sg != env->sd->groups);
10127
10128 /* Tag domain that child domain prefers tasks go to siblings first */
10129 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
10130
10131
10132 if (env->sd->flags & SD_NUMA)
10133 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
10134
10135 if (!env->sd->parent) {
10136 struct root_domain *rd = env->dst_rq->rd;
10137
10138 /* update overload indicator if we are at root domain */
10139 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
10140
10141 /* Update over-utilization (tipping point, U >= 0) indicator */
10142 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
10143 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
10144 } else if (sg_status & SG_OVERUTILIZED) {
10145 struct root_domain *rd = env->dst_rq->rd;
10146
10147 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
10148 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
10149 }
10150
10151 update_idle_cpu_scan(env, sum_util);
10152 }
10153
10154 /**
10155 * calculate_imbalance - Calculate the amount of imbalance present within the
10156 * groups of a given sched_domain during load balance.
10157 * @env: load balance environment
10158 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
10159 */
10160 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
10161 {
10162 struct sg_lb_stats *local, *busiest;
10163
10164 local = &sds->local_stat;
10165 busiest = &sds->busiest_stat;
10166
10167 if (busiest->group_type == group_misfit_task) {
10168 if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
10169 /* Set imbalance to allow misfit tasks to be balanced. */
10170 env->migration_type = migrate_misfit;
10171 env->imbalance = 1;
10172 } else {
10173 /*
10174 * Set load imbalance to allow moving task from cpu
10175 * with reduced capacity.
10176 */
10177 env->migration_type = migrate_load;
10178 env->imbalance = busiest->group_misfit_task_load;
10179 }
10180 return;
10181 }
10182
10183 if (busiest->group_type == group_asym_packing) {
10184 /*
10185 * In case of asym capacity, we will try to migrate all load to
10186 * the preferred CPU.
10187 */
10188 env->migration_type = migrate_task;
10189 env->imbalance = busiest->sum_h_nr_running;
10190 return;
10191 }
10192
10193 if (busiest->group_type == group_imbalanced) {
10194 /*
10195 * In the group_imb case we cannot rely on group-wide averages
10196 * to ensure CPU-load equilibrium, try to move any task to fix
10197 * the imbalance. The next load balance will take care of
10198 * balancing back the system.
10199 */
10200 env->migration_type = migrate_task;
10201 env->imbalance = 1;
10202 return;
10203 }
10204
10205 /*
10206 * Try to use spare capacity of local group without overloading it or
10207 * emptying busiest.
10208 */
10209 if (local->group_type == group_has_spare) {
10210 if ((busiest->group_type > group_fully_busy) &&
10211 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
10212 /*
10213 * If busiest is overloaded, try to fill spare
10214 * capacity. This might end up creating spare capacity
10215 * in busiest or busiest still being overloaded but
10216 * there is no simple way to directly compute the
10217 * amount of load to migrate in order to balance the
10218 * system.
10219 */
10220 env->migration_type = migrate_util;
10221 env->imbalance = max(local->group_capacity, local->group_util) -
10222 local->group_util;
10223
10224 /*
10225 * In some cases, the group's utilization is max or even
10226 * higher than capacity because of migrations but the
10227 * local CPU is (newly) idle. There is at least one
10228 * waiting task in this overloaded busiest group. Let's
10229 * try to pull it.
10230 */
10231 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
10232 env->migration_type = migrate_task;
10233 env->imbalance = 1;
10234 }
10235
10236 return;
10237 }
10238
10239 if (busiest->group_weight == 1 || sds->prefer_sibling) {
10240 unsigned int nr_diff = busiest->sum_nr_running;
10241 /*
10242 * When prefer sibling, evenly spread running tasks on
10243 * groups.
10244 */
10245 env->migration_type = migrate_task;
10246 lsub_positive(&nr_diff, local->sum_nr_running);
10247 env->imbalance = nr_diff;
10248 } else {
10249
10250 /*
10251 * If there is no overload, we just want to even the number of
10252 * idle cpus.
10253 */
10254 env->migration_type = migrate_task;
10255 env->imbalance = max_t(long, 0,
10256 (local->idle_cpus - busiest->idle_cpus));
10257 }
10258
10259 #ifdef CONFIG_NUMA
10260 /* Consider allowing a small imbalance between NUMA groups */
10261 if (env->sd->flags & SD_NUMA) {
10262 env->imbalance = adjust_numa_imbalance(env->imbalance,
10263 local->sum_nr_running + 1,
10264 env->sd->imb_numa_nr);
10265 }
10266 #endif
10267
10268 /* Number of tasks to move to restore balance */
10269 env->imbalance >>= 1;
10270
10271 return;
10272 }
10273
10274 /*
10275 * Local is fully busy but has to take more load to relieve the
10276 * busiest group
10277 */
10278 if (local->group_type < group_overloaded) {
10279 /*
10280 * Local will become overloaded so the avg_load metrics are
10281 * finally needed.
10282 */
10283
10284 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
10285 local->group_capacity;
10286
10287 /*
10288 * If the local group is more loaded than the selected
10289 * busiest group don't try to pull any tasks.
10290 */
10291 if (local->avg_load >= busiest->avg_load) {
10292 env->imbalance = 0;
10293 return;
10294 }
10295
10296 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
10297 sds->total_capacity;
10298
10299 /*
10300 * If the local group is more loaded than the average system
10301 * load, don't try to pull any tasks.
10302 */
10303 if (local->avg_load >= sds->avg_load) {
10304 env->imbalance = 0;
10305 return;
10306 }
10307
10308 }
10309
10310 /*
10311 * Both group are or will become overloaded and we're trying to get all
10312 * the CPUs to the average_load, so we don't want to push ourselves
10313 * above the average load, nor do we wish to reduce the max loaded CPU
10314 * below the average load. At the same time, we also don't want to
10315 * reduce the group load below the group capacity. Thus we look for
10316 * the minimum possible imbalance.
10317 */
10318 env->migration_type = migrate_load;
10319 env->imbalance = min(
10320 (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
10321 (sds->avg_load - local->avg_load) * local->group_capacity
10322 ) / SCHED_CAPACITY_SCALE;
10323 }
10324
10325 /******* find_busiest_group() helpers end here *********************/
10326
10327 /*
10328 * Decision matrix according to the local and busiest group type:
10329 *
10330 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
10331 * has_spare nr_idle balanced N/A N/A balanced balanced
10332 * fully_busy nr_idle nr_idle N/A N/A balanced balanced
10333 * misfit_task force N/A N/A N/A N/A N/A
10334 * asym_packing force force N/A N/A force force
10335 * imbalanced force force N/A N/A force force
10336 * overloaded force force N/A N/A force avg_load
10337 *
10338 * N/A : Not Applicable because already filtered while updating
10339 * statistics.
10340 * balanced : The system is balanced for these 2 groups.
10341 * force : Calculate the imbalance as load migration is probably needed.
10342 * avg_load : Only if imbalance is significant enough.
10343 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
10344 * different in groups.
10345 */
10346
10347 /**
10348 * find_busiest_group - Returns the busiest group within the sched_domain
10349 * if there is an imbalance.
10350 * @env: The load balancing environment.
10351 *
10352 * Also calculates the amount of runnable load which should be moved
10353 * to restore balance.
10354 *
10355 * Return: - The busiest group if imbalance exists.
10356 */
10357 static struct sched_group *find_busiest_group(struct lb_env *env)
10358 {
10359 struct sg_lb_stats *local, *busiest;
10360 struct sd_lb_stats sds;
10361
10362 init_sd_lb_stats(&sds);
10363
10364 /*
10365 * Compute the various statistics relevant for load balancing at
10366 * this level.
10367 */
10368 update_sd_lb_stats(env, &sds);
10369
10370 /* There is no busy sibling group to pull tasks from */
10371 if (!sds.busiest)
10372 goto out_balanced;
10373
10374 busiest = &sds.busiest_stat;
10375
10376 /* Misfit tasks should be dealt with regardless of the avg load */
10377 if (busiest->group_type == group_misfit_task)
10378 goto force_balance;
10379
10380 if (sched_energy_enabled()) {
10381 struct root_domain *rd = env->dst_rq->rd;
10382
10383 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
10384 goto out_balanced;
10385 }
10386
10387 /* ASYM feature bypasses nice load balance check */
10388 if (busiest->group_type == group_asym_packing)
10389 goto force_balance;
10390
10391 /*
10392 * If the busiest group is imbalanced the below checks don't
10393 * work because they assume all things are equal, which typically
10394 * isn't true due to cpus_ptr constraints and the like.
10395 */
10396 if (busiest->group_type == group_imbalanced)
10397 goto force_balance;
10398
10399 local = &sds.local_stat;
10400 /*
10401 * If the local group is busier than the selected busiest group
10402 * don't try and pull any tasks.
10403 */
10404 if (local->group_type > busiest->group_type)
10405 goto out_balanced;
10406
10407 /*
10408 * When groups are overloaded, use the avg_load to ensure fairness
10409 * between tasks.
10410 */
10411 if (local->group_type == group_overloaded) {
10412 /*
10413 * If the local group is more loaded than the selected
10414 * busiest group don't try to pull any tasks.
10415 */
10416 if (local->avg_load >= busiest->avg_load)
10417 goto out_balanced;
10418
10419 /* XXX broken for overlapping NUMA groups */
10420 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
10421 sds.total_capacity;
10422
10423 /*
10424 * Don't pull any tasks if this group is already above the
10425 * domain average load.
10426 */
10427 if (local->avg_load >= sds.avg_load)
10428 goto out_balanced;
10429
10430 /*
10431 * If the busiest group is more loaded, use imbalance_pct to be
10432 * conservative.
10433 */
10434 if (100 * busiest->avg_load <=
10435 env->sd->imbalance_pct * local->avg_load)
10436 goto out_balanced;
10437 }
10438
10439 /* Try to move all excess tasks to child's sibling domain */
10440 if (sds.prefer_sibling && local->group_type == group_has_spare &&
10441 busiest->sum_nr_running > local->sum_nr_running + 1)
10442 goto force_balance;
10443
10444 if (busiest->group_type != group_overloaded) {
10445 if (env->idle == CPU_NOT_IDLE)
10446 /*
10447 * If the busiest group is not overloaded (and as a
10448 * result the local one too) but this CPU is already
10449 * busy, let another idle CPU try to pull task.
10450 */
10451 goto out_balanced;
10452
10453 if (busiest->group_weight > 1 &&
10454 local->idle_cpus <= (busiest->idle_cpus + 1))
10455 /*
10456 * If the busiest group is not overloaded
10457 * and there is no imbalance between this and busiest
10458 * group wrt idle CPUs, it is balanced. The imbalance
10459 * becomes significant if the diff is greater than 1
10460 * otherwise we might end up to just move the imbalance
10461 * on another group. Of course this applies only if
10462 * there is more than 1 CPU per group.
10463 */
10464 goto out_balanced;
10465
10466 if (busiest->sum_h_nr_running == 1)
10467 /*
10468 * busiest doesn't have any tasks waiting to run
10469 */
10470 goto out_balanced;
10471 }
10472
10473 force_balance:
10474 /* Looks like there is an imbalance. Compute it */
10475 calculate_imbalance(env, &sds);
10476 return env->imbalance ? sds.busiest : NULL;
10477
10478 out_balanced:
10479 env->imbalance = 0;
10480 return NULL;
10481 }
10482
10483 /*
10484 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
10485 */
10486 static struct rq *find_busiest_queue(struct lb_env *env,
10487 struct sched_group *group)
10488 {
10489 struct rq *busiest = NULL, *rq;
10490 unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
10491 unsigned int busiest_nr = 0;
10492 int i;
10493
10494 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
10495 unsigned long capacity, load, util;
10496 unsigned int nr_running;
10497 enum fbq_type rt;
10498
10499 rq = cpu_rq(i);
10500 rt = fbq_classify_rq(rq);
10501
10502 /*
10503 * We classify groups/runqueues into three groups:
10504 * - regular: there are !numa tasks
10505 * - remote: there are numa tasks that run on the 'wrong' node
10506 * - all: there is no distinction
10507 *
10508 * In order to avoid migrating ideally placed numa tasks,
10509 * ignore those when there's better options.
10510 *
10511 * If we ignore the actual busiest queue to migrate another
10512 * task, the next balance pass can still reduce the busiest
10513 * queue by moving tasks around inside the node.
10514 *
10515 * If we cannot move enough load due to this classification
10516 * the next pass will adjust the group classification and
10517 * allow migration of more tasks.
10518 *
10519 * Both cases only affect the total convergence complexity.
10520 */
10521 if (rt > env->fbq_type)
10522 continue;
10523
10524 nr_running = rq->cfs.h_nr_running;
10525 if (!nr_running)
10526 continue;
10527
10528 capacity = capacity_of(i);
10529
10530 /*
10531 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
10532 * eventually lead to active_balancing high->low capacity.
10533 * Higher per-CPU capacity is considered better than balancing
10534 * average load.
10535 */
10536 if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
10537 !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
10538 nr_running == 1)
10539 continue;
10540
10541 /* Make sure we only pull tasks from a CPU of lower priority */
10542 if ((env->sd->flags & SD_ASYM_PACKING) &&
10543 sched_asym_prefer(i, env->dst_cpu) &&
10544 nr_running == 1)
10545 continue;
10546
10547 switch (env->migration_type) {
10548 case migrate_load:
10549 /*
10550 * When comparing with load imbalance, use cpu_load()
10551 * which is not scaled with the CPU capacity.
10552 */
10553 load = cpu_load(rq);
10554
10555 if (nr_running == 1 && load > env->imbalance &&
10556 !check_cpu_capacity(rq, env->sd))
10557 break;
10558
10559 /*
10560 * For the load comparisons with the other CPUs,
10561 * consider the cpu_load() scaled with the CPU
10562 * capacity, so that the load can be moved away
10563 * from the CPU that is potentially running at a
10564 * lower capacity.
10565 *
10566 * Thus we're looking for max(load_i / capacity_i),
10567 * crosswise multiplication to rid ourselves of the
10568 * division works out to:
10569 * load_i * capacity_j > load_j * capacity_i;
10570 * where j is our previous maximum.
10571 */
10572 if (load * busiest_capacity > busiest_load * capacity) {
10573 busiest_load = load;
10574 busiest_capacity = capacity;
10575 busiest = rq;
10576 }
10577 break;
10578
10579 case migrate_util:
10580 util = cpu_util_cfs(i);
10581
10582 /*
10583 * Don't try to pull utilization from a CPU with one
10584 * running task. Whatever its utilization, we will fail
10585 * detach the task.
10586 */
10587 if (nr_running <= 1)
10588 continue;
10589
10590 if (busiest_util < util) {
10591 busiest_util = util;
10592 busiest = rq;
10593 }
10594 break;
10595
10596 case migrate_task:
10597 if (busiest_nr < nr_running) {
10598 busiest_nr = nr_running;
10599 busiest = rq;
10600 }
10601 break;
10602
10603 case migrate_misfit:
10604 /*
10605 * For ASYM_CPUCAPACITY domains with misfit tasks we
10606 * simply seek the "biggest" misfit task.
10607 */
10608 if (rq->misfit_task_load > busiest_load) {
10609 busiest_load = rq->misfit_task_load;
10610 busiest = rq;
10611 }
10612
10613 break;
10614
10615 }
10616 }
10617
10618 return busiest;
10619 }
10620
10621 /*
10622 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
10623 * so long as it is large enough.
10624 */
10625 #define MAX_PINNED_INTERVAL 512
10626
10627 static inline bool
10628 asym_active_balance(struct lb_env *env)
10629 {
10630 /*
10631 * ASYM_PACKING needs to force migrate tasks from busy but
10632 * lower priority CPUs in order to pack all tasks in the
10633 * highest priority CPUs.
10634 */
10635 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
10636 sched_asym_prefer(env->dst_cpu, env->src_cpu);
10637 }
10638
10639 static inline bool
10640 imbalanced_active_balance(struct lb_env *env)
10641 {
10642 struct sched_domain *sd = env->sd;
10643
10644 /*
10645 * The imbalanced case includes the case of pinned tasks preventing a fair
10646 * distribution of the load on the system but also the even distribution of the
10647 * threads on a system with spare capacity
10648 */
10649 if ((env->migration_type == migrate_task) &&
10650 (sd->nr_balance_failed > sd->cache_nice_tries+2))
10651 return 1;
10652
10653 return 0;
10654 }
10655
10656 static int need_active_balance(struct lb_env *env)
10657 {
10658 struct sched_domain *sd = env->sd;
10659
10660 if (asym_active_balance(env))
10661 return 1;
10662
10663 if (imbalanced_active_balance(env))
10664 return 1;
10665
10666 /*
10667 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
10668 * It's worth migrating the task if the src_cpu's capacity is reduced
10669 * because of other sched_class or IRQs if more capacity stays
10670 * available on dst_cpu.
10671 */
10672 if ((env->idle != CPU_NOT_IDLE) &&
10673 (env->src_rq->cfs.h_nr_running == 1)) {
10674 if ((check_cpu_capacity(env->src_rq, sd)) &&
10675 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
10676 return 1;
10677 }
10678
10679 if (env->migration_type == migrate_misfit)
10680 return 1;
10681
10682 return 0;
10683 }
10684
10685 static int active_load_balance_cpu_stop(void *data);
10686
10687 static int should_we_balance(struct lb_env *env)
10688 {
10689 struct sched_group *sg = env->sd->groups;
10690 int cpu;
10691
10692 /*
10693 * Ensure the balancing environment is consistent; can happen
10694 * when the softirq triggers 'during' hotplug.
10695 */
10696 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
10697 return 0;
10698
10699 /*
10700 * In the newly idle case, we will allow all the CPUs
10701 * to do the newly idle load balance.
10702 *
10703 * However, we bail out if we already have tasks or a wakeup pending,
10704 * to optimize wakeup latency.
10705 */
10706 if (env->idle == CPU_NEWLY_IDLE) {
10707 if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending)
10708 return 0;
10709 return 1;
10710 }
10711
10712 /* Try to find first idle CPU */
10713 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
10714 if (!idle_cpu(cpu))
10715 continue;
10716
10717 /* Are we the first idle CPU? */
10718 return cpu == env->dst_cpu;
10719 }
10720
10721 /* Are we the first CPU of this group ? */
10722 return group_balance_cpu(sg) == env->dst_cpu;
10723 }
10724
10725 /*
10726 * Check this_cpu to ensure it is balanced within domain. Attempt to move
10727 * tasks if there is an imbalance.
10728 */
10729 static int load_balance(int this_cpu, struct rq *this_rq,
10730 struct sched_domain *sd, enum cpu_idle_type idle,
10731 int *continue_balancing)
10732 {
10733 int ld_moved, cur_ld_moved, active_balance = 0;
10734 struct sched_domain *sd_parent = sd->parent;
10735 struct sched_group *group;
10736 struct rq *busiest;
10737 struct rq_flags rf;
10738 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
10739 struct lb_env env = {
10740 .sd = sd,
10741 .dst_cpu = this_cpu,
10742 .dst_rq = this_rq,
10743 .dst_grpmask = sched_group_span(sd->groups),
10744 .idle = idle,
10745 .loop_break = SCHED_NR_MIGRATE_BREAK,
10746 .cpus = cpus,
10747 .fbq_type = all,
10748 .tasks = LIST_HEAD_INIT(env.tasks),
10749 };
10750
10751 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
10752
10753 schedstat_inc(sd->lb_count[idle]);
10754
10755 redo:
10756 if (!should_we_balance(&env)) {
10757 *continue_balancing = 0;
10758 goto out_balanced;
10759 }
10760
10761 group = find_busiest_group(&env);
10762 if (!group) {
10763 schedstat_inc(sd->lb_nobusyg[idle]);
10764 goto out_balanced;
10765 }
10766
10767 busiest = find_busiest_queue(&env, group);
10768 if (!busiest) {
10769 schedstat_inc(sd->lb_nobusyq[idle]);
10770 goto out_balanced;
10771 }
10772
10773 WARN_ON_ONCE(busiest == env.dst_rq);
10774
10775 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
10776
10777 env.src_cpu = busiest->cpu;
10778 env.src_rq = busiest;
10779
10780 ld_moved = 0;
10781 /* Clear this flag as soon as we find a pullable task */
10782 env.flags |= LBF_ALL_PINNED;
10783 if (busiest->nr_running > 1) {
10784 /*
10785 * Attempt to move tasks. If find_busiest_group has found
10786 * an imbalance but busiest->nr_running <= 1, the group is
10787 * still unbalanced. ld_moved simply stays zero, so it is
10788 * correctly treated as an imbalance.
10789 */
10790 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
10791
10792 more_balance:
10793 rq_lock_irqsave(busiest, &rf);
10794 update_rq_clock(busiest);
10795
10796 /*
10797 * cur_ld_moved - load moved in current iteration
10798 * ld_moved - cumulative load moved across iterations
10799 */
10800 cur_ld_moved = detach_tasks(&env);
10801
10802 /*
10803 * We've detached some tasks from busiest_rq. Every
10804 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
10805 * unlock busiest->lock, and we are able to be sure
10806 * that nobody can manipulate the tasks in parallel.
10807 * See task_rq_lock() family for the details.
10808 */
10809
10810 rq_unlock(busiest, &rf);
10811
10812 if (cur_ld_moved) {
10813 attach_tasks(&env);
10814 ld_moved += cur_ld_moved;
10815 }
10816
10817 local_irq_restore(rf.flags);
10818
10819 if (env.flags & LBF_NEED_BREAK) {
10820 env.flags &= ~LBF_NEED_BREAK;
10821 /* Stop if we tried all running tasks */
10822 if (env.loop < busiest->nr_running)
10823 goto more_balance;
10824 }
10825
10826 /*
10827 * Revisit (affine) tasks on src_cpu that couldn't be moved to
10828 * us and move them to an alternate dst_cpu in our sched_group
10829 * where they can run. The upper limit on how many times we
10830 * iterate on same src_cpu is dependent on number of CPUs in our
10831 * sched_group.
10832 *
10833 * This changes load balance semantics a bit on who can move
10834 * load to a given_cpu. In addition to the given_cpu itself
10835 * (or a ilb_cpu acting on its behalf where given_cpu is
10836 * nohz-idle), we now have balance_cpu in a position to move
10837 * load to given_cpu. In rare situations, this may cause
10838 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
10839 * _independently_ and at _same_ time to move some load to
10840 * given_cpu) causing excess load to be moved to given_cpu.
10841 * This however should not happen so much in practice and
10842 * moreover subsequent load balance cycles should correct the
10843 * excess load moved.
10844 */
10845 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
10846
10847 /* Prevent to re-select dst_cpu via env's CPUs */
10848 __cpumask_clear_cpu(env.dst_cpu, env.cpus);
10849
10850 env.dst_rq = cpu_rq(env.new_dst_cpu);
10851 env.dst_cpu = env.new_dst_cpu;
10852 env.flags &= ~LBF_DST_PINNED;
10853 env.loop = 0;
10854 env.loop_break = SCHED_NR_MIGRATE_BREAK;
10855
10856 /*
10857 * Go back to "more_balance" rather than "redo" since we
10858 * need to continue with same src_cpu.
10859 */
10860 goto more_balance;
10861 }
10862
10863 /*
10864 * We failed to reach balance because of affinity.
10865 */
10866 if (sd_parent) {
10867 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10868
10869 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
10870 *group_imbalance = 1;
10871 }
10872
10873 /* All tasks on this runqueue were pinned by CPU affinity */
10874 if (unlikely(env.flags & LBF_ALL_PINNED)) {
10875 __cpumask_clear_cpu(cpu_of(busiest), cpus);
10876 /*
10877 * Attempting to continue load balancing at the current
10878 * sched_domain level only makes sense if there are
10879 * active CPUs remaining as possible busiest CPUs to
10880 * pull load from which are not contained within the
10881 * destination group that is receiving any migrated
10882 * load.
10883 */
10884 if (!cpumask_subset(cpus, env.dst_grpmask)) {
10885 env.loop = 0;
10886 env.loop_break = SCHED_NR_MIGRATE_BREAK;
10887 goto redo;
10888 }
10889 goto out_all_pinned;
10890 }
10891 }
10892
10893 if (!ld_moved) {
10894 schedstat_inc(sd->lb_failed[idle]);
10895 /*
10896 * Increment the failure counter only on periodic balance.
10897 * We do not want newidle balance, which can be very
10898 * frequent, pollute the failure counter causing
10899 * excessive cache_hot migrations and active balances.
10900 */
10901 if (idle != CPU_NEWLY_IDLE)
10902 sd->nr_balance_failed++;
10903
10904 if (need_active_balance(&env)) {
10905 unsigned long flags;
10906
10907 raw_spin_rq_lock_irqsave(busiest, flags);
10908
10909 /*
10910 * Don't kick the active_load_balance_cpu_stop,
10911 * if the curr task on busiest CPU can't be
10912 * moved to this_cpu:
10913 */
10914 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
10915 raw_spin_rq_unlock_irqrestore(busiest, flags);
10916 goto out_one_pinned;
10917 }
10918
10919 /* Record that we found at least one task that could run on this_cpu */
10920 env.flags &= ~LBF_ALL_PINNED;
10921
10922 /*
10923 * ->active_balance synchronizes accesses to
10924 * ->active_balance_work. Once set, it's cleared
10925 * only after active load balance is finished.
10926 */
10927 if (!busiest->active_balance) {
10928 busiest->active_balance = 1;
10929 busiest->push_cpu = this_cpu;
10930 active_balance = 1;
10931 }
10932 raw_spin_rq_unlock_irqrestore(busiest, flags);
10933
10934 if (active_balance) {
10935 stop_one_cpu_nowait(cpu_of(busiest),
10936 active_load_balance_cpu_stop, busiest,
10937 &busiest->active_balance_work);
10938 }
10939 }
10940 } else {
10941 sd->nr_balance_failed = 0;
10942 }
10943
10944 if (likely(!active_balance) || need_active_balance(&env)) {
10945 /* We were unbalanced, so reset the balancing interval */
10946 sd->balance_interval = sd->min_interval;
10947 }
10948
10949 goto out;
10950
10951 out_balanced:
10952 /*
10953 * We reach balance although we may have faced some affinity
10954 * constraints. Clear the imbalance flag only if other tasks got
10955 * a chance to move and fix the imbalance.
10956 */
10957 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
10958 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
10959
10960 if (*group_imbalance)
10961 *group_imbalance = 0;
10962 }
10963
10964 out_all_pinned:
10965 /*
10966 * We reach balance because all tasks are pinned at this level so
10967 * we can't migrate them. Let the imbalance flag set so parent level
10968 * can try to migrate them.
10969 */
10970 schedstat_inc(sd->lb_balanced[idle]);
10971
10972 sd->nr_balance_failed = 0;
10973
10974 out_one_pinned:
10975 ld_moved = 0;
10976
10977 /*
10978 * newidle_balance() disregards balance intervals, so we could
10979 * repeatedly reach this code, which would lead to balance_interval
10980 * skyrocketing in a short amount of time. Skip the balance_interval
10981 * increase logic to avoid that.
10982 */
10983 if (env.idle == CPU_NEWLY_IDLE)
10984 goto out;
10985
10986 /* tune up the balancing interval */
10987 if ((env.flags & LBF_ALL_PINNED &&
10988 sd->balance_interval < MAX_PINNED_INTERVAL) ||
10989 sd->balance_interval < sd->max_interval)
10990 sd->balance_interval *= 2;
10991 out:
10992 return ld_moved;
10993 }
10994
10995 static inline unsigned long
10996 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
10997 {
10998 unsigned long interval = sd->balance_interval;
10999
11000 if (cpu_busy)
11001 interval *= sd->busy_factor;
11002
11003 /* scale ms to jiffies */
11004 interval = msecs_to_jiffies(interval);
11005
11006 /*
11007 * Reduce likelihood of busy balancing at higher domains racing with
11008 * balancing at lower domains by preventing their balancing periods
11009 * from being multiples of each other.
11010 */
11011 if (cpu_busy)
11012 interval -= 1;
11013
11014 interval = clamp(interval, 1UL, max_load_balance_interval);
11015
11016 return interval;
11017 }
11018
11019 static inline void
11020 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
11021 {
11022 unsigned long interval, next;
11023
11024 /* used by idle balance, so cpu_busy = 0 */
11025 interval = get_sd_balance_interval(sd, 0);
11026 next = sd->last_balance + interval;
11027
11028 if (time_after(*next_balance, next))
11029 *next_balance = next;
11030 }
11031
11032 /*
11033 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
11034 * running tasks off the busiest CPU onto idle CPUs. It requires at
11035 * least 1 task to be running on each physical CPU where possible, and
11036 * avoids physical / logical imbalances.
11037 */
11038 static int active_load_balance_cpu_stop(void *data)
11039 {
11040 struct rq *busiest_rq = data;
11041 int busiest_cpu = cpu_of(busiest_rq);
11042 int target_cpu = busiest_rq->push_cpu;
11043 struct rq *target_rq = cpu_rq(target_cpu);
11044 struct sched_domain *sd;
11045 struct task_struct *p = NULL;
11046 struct rq_flags rf;
11047
11048 rq_lock_irq(busiest_rq, &rf);
11049 /*
11050 * Between queueing the stop-work and running it is a hole in which
11051 * CPUs can become inactive. We should not move tasks from or to
11052 * inactive CPUs.
11053 */
11054 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
11055 goto out_unlock;
11056
11057 /* Make sure the requested CPU hasn't gone down in the meantime: */
11058 if (unlikely(busiest_cpu != smp_processor_id() ||
11059 !busiest_rq->active_balance))
11060 goto out_unlock;
11061
11062 /* Is there any task to move? */
11063 if (busiest_rq->nr_running <= 1)
11064 goto out_unlock;
11065
11066 /*
11067 * This condition is "impossible", if it occurs
11068 * we need to fix it. Originally reported by
11069 * Bjorn Helgaas on a 128-CPU setup.
11070 */
11071 WARN_ON_ONCE(busiest_rq == target_rq);
11072
11073 /* Search for an sd spanning us and the target CPU. */
11074 rcu_read_lock();
11075 for_each_domain(target_cpu, sd) {
11076 if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
11077 break;
11078 }
11079
11080 if (likely(sd)) {
11081 struct lb_env env = {
11082 .sd = sd,
11083 .dst_cpu = target_cpu,
11084 .dst_rq = target_rq,
11085 .src_cpu = busiest_rq->cpu,
11086 .src_rq = busiest_rq,
11087 .idle = CPU_IDLE,
11088 .flags = LBF_ACTIVE_LB,
11089 };
11090
11091 schedstat_inc(sd->alb_count);
11092 update_rq_clock(busiest_rq);
11093
11094 p = detach_one_task(&env);
11095 if (p) {
11096 schedstat_inc(sd->alb_pushed);
11097 /* Active balancing done, reset the failure counter. */
11098 sd->nr_balance_failed = 0;
11099 } else {
11100 schedstat_inc(sd->alb_failed);
11101 }
11102 }
11103 rcu_read_unlock();
11104 out_unlock:
11105 busiest_rq->active_balance = 0;
11106 rq_unlock(busiest_rq, &rf);
11107
11108 if (p)
11109 attach_one_task(target_rq, p);
11110
11111 local_irq_enable();
11112
11113 return 0;
11114 }
11115
11116 static DEFINE_SPINLOCK(balancing);
11117
11118 /*
11119 * Scale the max load_balance interval with the number of CPUs in the system.
11120 * This trades load-balance latency on larger machines for less cross talk.
11121 */
11122 void update_max_interval(void)
11123 {
11124 max_load_balance_interval = HZ*num_online_cpus()/10;
11125 }
11126
11127 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
11128 {
11129 if (cost > sd->max_newidle_lb_cost) {
11130 /*
11131 * Track max cost of a domain to make sure to not delay the
11132 * next wakeup on the CPU.
11133 */
11134 sd->max_newidle_lb_cost = cost;
11135 sd->last_decay_max_lb_cost = jiffies;
11136 } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
11137 /*
11138 * Decay the newidle max times by ~1% per second to ensure that
11139 * it is not outdated and the current max cost is actually
11140 * shorter.
11141 */
11142 sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256;
11143 sd->last_decay_max_lb_cost = jiffies;
11144
11145 return true;
11146 }
11147
11148 return false;
11149 }
11150
11151 /*
11152 * It checks each scheduling domain to see if it is due to be balanced,
11153 * and initiates a balancing operation if so.
11154 *
11155 * Balancing parameters are set up in init_sched_domains.
11156 */
11157 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
11158 {
11159 int continue_balancing = 1;
11160 int cpu = rq->cpu;
11161 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
11162 unsigned long interval;
11163 struct sched_domain *sd;
11164 /* Earliest time when we have to do rebalance again */
11165 unsigned long next_balance = jiffies + 60*HZ;
11166 int update_next_balance = 0;
11167 int need_serialize, need_decay = 0;
11168 u64 max_cost = 0;
11169
11170 rcu_read_lock();
11171 for_each_domain(cpu, sd) {
11172 /*
11173 * Decay the newidle max times here because this is a regular
11174 * visit to all the domains.
11175 */
11176 need_decay = update_newidle_cost(sd, 0);
11177 max_cost += sd->max_newidle_lb_cost;
11178
11179 /*
11180 * Stop the load balance at this level. There is another
11181 * CPU in our sched group which is doing load balancing more
11182 * actively.
11183 */
11184 if (!continue_balancing) {
11185 if (need_decay)
11186 continue;
11187 break;
11188 }
11189
11190 interval = get_sd_balance_interval(sd, busy);
11191
11192 need_serialize = sd->flags & SD_SERIALIZE;
11193 if (need_serialize) {
11194 if (!spin_trylock(&balancing))
11195 goto out;
11196 }
11197
11198 if (time_after_eq(jiffies, sd->last_balance + interval)) {
11199 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
11200 /*
11201 * The LBF_DST_PINNED logic could have changed
11202 * env->dst_cpu, so we can't know our idle
11203 * state even if we migrated tasks. Update it.
11204 */
11205 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
11206 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
11207 }
11208 sd->last_balance = jiffies;
11209 interval = get_sd_balance_interval(sd, busy);
11210 }
11211 if (need_serialize)
11212 spin_unlock(&balancing);
11213 out:
11214 if (time_after(next_balance, sd->last_balance + interval)) {
11215 next_balance = sd->last_balance + interval;
11216 update_next_balance = 1;
11217 }
11218 }
11219 if (need_decay) {
11220 /*
11221 * Ensure the rq-wide value also decays but keep it at a
11222 * reasonable floor to avoid funnies with rq->avg_idle.
11223 */
11224 rq->max_idle_balance_cost =
11225 max((u64)sysctl_sched_migration_cost, max_cost);
11226 }
11227 rcu_read_unlock();
11228
11229 /*
11230 * next_balance will be updated only when there is a need.
11231 * When the cpu is attached to null domain for ex, it will not be
11232 * updated.
11233 */
11234 if (likely(update_next_balance))
11235 rq->next_balance = next_balance;
11236
11237 }
11238
11239 static inline int on_null_domain(struct rq *rq)
11240 {
11241 return unlikely(!rcu_dereference_sched(rq->sd));
11242 }
11243
11244 #ifdef CONFIG_NO_HZ_COMMON
11245 /*
11246 * idle load balancing details
11247 * - When one of the busy CPUs notice that there may be an idle rebalancing
11248 * needed, they will kick the idle load balancer, which then does idle
11249 * load balancing for all the idle CPUs.
11250 * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
11251 * anywhere yet.
11252 */
11253
11254 static inline int find_new_ilb(void)
11255 {
11256 int ilb;
11257 const struct cpumask *hk_mask;
11258
11259 hk_mask = housekeeping_cpumask(HK_TYPE_MISC);
11260
11261 for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
11262
11263 if (ilb == smp_processor_id())
11264 continue;
11265
11266 if (idle_cpu(ilb))
11267 return ilb;
11268 }
11269
11270 return nr_cpu_ids;
11271 }
11272
11273 /*
11274 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
11275 * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
11276 */
11277 static void kick_ilb(unsigned int flags)
11278 {
11279 int ilb_cpu;
11280
11281 /*
11282 * Increase nohz.next_balance only when if full ilb is triggered but
11283 * not if we only update stats.
11284 */
11285 if (flags & NOHZ_BALANCE_KICK)
11286 nohz.next_balance = jiffies+1;
11287
11288 ilb_cpu = find_new_ilb();
11289
11290 if (ilb_cpu >= nr_cpu_ids)
11291 return;
11292
11293 /*
11294 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
11295 * the first flag owns it; cleared by nohz_csd_func().
11296 */
11297 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
11298 if (flags & NOHZ_KICK_MASK)
11299 return;
11300
11301 /*
11302 * This way we generate an IPI on the target CPU which
11303 * is idle. And the softirq performing nohz idle load balance
11304 * will be run before returning from the IPI.
11305 */
11306 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
11307 }
11308
11309 /*
11310 * Current decision point for kicking the idle load balancer in the presence
11311 * of idle CPUs in the system.
11312 */
11313 static void nohz_balancer_kick(struct rq *rq)
11314 {
11315 unsigned long now = jiffies;
11316 struct sched_domain_shared *sds;
11317 struct sched_domain *sd;
11318 int nr_busy, i, cpu = rq->cpu;
11319 unsigned int flags = 0;
11320
11321 if (unlikely(rq->idle_balance))
11322 return;
11323
11324 /*
11325 * We may be recently in ticked or tickless idle mode. At the first
11326 * busy tick after returning from idle, we will update the busy stats.
11327 */
11328 nohz_balance_exit_idle(rq);
11329
11330 /*
11331 * None are in tickless mode and hence no need for NOHZ idle load
11332 * balancing.
11333 */
11334 if (likely(!atomic_read(&nohz.nr_cpus)))
11335 return;
11336
11337 if (READ_ONCE(nohz.has_blocked) &&
11338 time_after(now, READ_ONCE(nohz.next_blocked)))
11339 flags = NOHZ_STATS_KICK;
11340
11341 if (time_before(now, nohz.next_balance))
11342 goto out;
11343
11344 if (rq->nr_running >= 2) {
11345 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11346 goto out;
11347 }
11348
11349 rcu_read_lock();
11350
11351 sd = rcu_dereference(rq->sd);
11352 if (sd) {
11353 /*
11354 * If there's a CFS task and the current CPU has reduced
11355 * capacity; kick the ILB to see if there's a better CPU to run
11356 * on.
11357 */
11358 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
11359 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11360 goto unlock;
11361 }
11362 }
11363
11364 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
11365 if (sd) {
11366 /*
11367 * When ASYM_PACKING; see if there's a more preferred CPU
11368 * currently idle; in which case, kick the ILB to move tasks
11369 * around.
11370 */
11371 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
11372 if (sched_asym_prefer(i, cpu)) {
11373 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11374 goto unlock;
11375 }
11376 }
11377 }
11378
11379 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
11380 if (sd) {
11381 /*
11382 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
11383 * to run the misfit task on.
11384 */
11385 if (check_misfit_status(rq, sd)) {
11386 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11387 goto unlock;
11388 }
11389
11390 /*
11391 * For asymmetric systems, we do not want to nicely balance
11392 * cache use, instead we want to embrace asymmetry and only
11393 * ensure tasks have enough CPU capacity.
11394 *
11395 * Skip the LLC logic because it's not relevant in that case.
11396 */
11397 goto unlock;
11398 }
11399
11400 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
11401 if (sds) {
11402 /*
11403 * If there is an imbalance between LLC domains (IOW we could
11404 * increase the overall cache use), we need some less-loaded LLC
11405 * domain to pull some load. Likewise, we may need to spread
11406 * load within the current LLC domain (e.g. packed SMT cores but
11407 * other CPUs are idle). We can't really know from here how busy
11408 * the others are - so just get a nohz balance going if it looks
11409 * like this LLC domain has tasks we could move.
11410 */
11411 nr_busy = atomic_read(&sds->nr_busy_cpus);
11412 if (nr_busy > 1) {
11413 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
11414 goto unlock;
11415 }
11416 }
11417 unlock:
11418 rcu_read_unlock();
11419 out:
11420 if (READ_ONCE(nohz.needs_update))
11421 flags |= NOHZ_NEXT_KICK;
11422
11423 if (flags)
11424 kick_ilb(flags);
11425 }
11426
11427 static void set_cpu_sd_state_busy(int cpu)
11428 {
11429 struct sched_domain *sd;
11430
11431 rcu_read_lock();
11432 sd = rcu_dereference(per_cpu(sd_llc, cpu));
11433
11434 if (!sd || !sd->nohz_idle)
11435 goto unlock;
11436 sd->nohz_idle = 0;
11437
11438 atomic_inc(&sd->shared->nr_busy_cpus);
11439 unlock:
11440 rcu_read_unlock();
11441 }
11442
11443 void nohz_balance_exit_idle(struct rq *rq)
11444 {
11445 SCHED_WARN_ON(rq != this_rq());
11446
11447 if (likely(!rq->nohz_tick_stopped))
11448 return;
11449
11450 rq->nohz_tick_stopped = 0;
11451 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
11452 atomic_dec(&nohz.nr_cpus);
11453
11454 set_cpu_sd_state_busy(rq->cpu);
11455 }
11456
11457 static void set_cpu_sd_state_idle(int cpu)
11458 {
11459 struct sched_domain *sd;
11460
11461 rcu_read_lock();
11462 sd = rcu_dereference(per_cpu(sd_llc, cpu));
11463
11464 if (!sd || sd->nohz_idle)
11465 goto unlock;
11466 sd->nohz_idle = 1;
11467
11468 atomic_dec(&sd->shared->nr_busy_cpus);
11469 unlock:
11470 rcu_read_unlock();
11471 }
11472
11473 /*
11474 * This routine will record that the CPU is going idle with tick stopped.
11475 * This info will be used in performing idle load balancing in the future.
11476 */
11477 void nohz_balance_enter_idle(int cpu)
11478 {
11479 struct rq *rq = cpu_rq(cpu);
11480
11481 SCHED_WARN_ON(cpu != smp_processor_id());
11482
11483 /* If this CPU is going down, then nothing needs to be done: */
11484 if (!cpu_active(cpu))
11485 return;
11486
11487 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
11488 if (!housekeeping_cpu(cpu, HK_TYPE_SCHED))
11489 return;
11490
11491 /*
11492 * Can be set safely without rq->lock held
11493 * If a clear happens, it will have evaluated last additions because
11494 * rq->lock is held during the check and the clear
11495 */
11496 rq->has_blocked_load = 1;
11497
11498 /*
11499 * The tick is still stopped but load could have been added in the
11500 * meantime. We set the nohz.has_blocked flag to trig a check of the
11501 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
11502 * of nohz.has_blocked can only happen after checking the new load
11503 */
11504 if (rq->nohz_tick_stopped)
11505 goto out;
11506
11507 /* If we're a completely isolated CPU, we don't play: */
11508 if (on_null_domain(rq))
11509 return;
11510
11511 rq->nohz_tick_stopped = 1;
11512
11513 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
11514 atomic_inc(&nohz.nr_cpus);
11515
11516 /*
11517 * Ensures that if nohz_idle_balance() fails to observe our
11518 * @idle_cpus_mask store, it must observe the @has_blocked
11519 * and @needs_update stores.
11520 */
11521 smp_mb__after_atomic();
11522
11523 set_cpu_sd_state_idle(cpu);
11524
11525 WRITE_ONCE(nohz.needs_update, 1);
11526 out:
11527 /*
11528 * Each time a cpu enter idle, we assume that it has blocked load and
11529 * enable the periodic update of the load of idle cpus
11530 */
11531 WRITE_ONCE(nohz.has_blocked, 1);
11532 }
11533
11534 static bool update_nohz_stats(struct rq *rq)
11535 {
11536 unsigned int cpu = rq->cpu;
11537
11538 if (!rq->has_blocked_load)
11539 return false;
11540
11541 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
11542 return false;
11543
11544 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
11545 return true;
11546
11547 update_blocked_averages(cpu);
11548
11549 return rq->has_blocked_load;
11550 }
11551
11552 /*
11553 * Internal function that runs load balance for all idle cpus. The load balance
11554 * can be a simple update of blocked load or a complete load balance with
11555 * tasks movement depending of flags.
11556 */
11557 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
11558 {
11559 /* Earliest time when we have to do rebalance again */
11560 unsigned long now = jiffies;
11561 unsigned long next_balance = now + 60*HZ;
11562 bool has_blocked_load = false;
11563 int update_next_balance = 0;
11564 int this_cpu = this_rq->cpu;
11565 int balance_cpu;
11566 struct rq *rq;
11567
11568 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
11569
11570 /*
11571 * We assume there will be no idle load after this update and clear
11572 * the has_blocked flag. If a cpu enters idle in the mean time, it will
11573 * set the has_blocked flag and trigger another update of idle load.
11574 * Because a cpu that becomes idle, is added to idle_cpus_mask before
11575 * setting the flag, we are sure to not clear the state and not
11576 * check the load of an idle cpu.
11577 *
11578 * Same applies to idle_cpus_mask vs needs_update.
11579 */
11580 if (flags & NOHZ_STATS_KICK)
11581 WRITE_ONCE(nohz.has_blocked, 0);
11582 if (flags & NOHZ_NEXT_KICK)
11583 WRITE_ONCE(nohz.needs_update, 0);
11584
11585 /*
11586 * Ensures that if we miss the CPU, we must see the has_blocked
11587 * store from nohz_balance_enter_idle().
11588 */
11589 smp_mb();
11590
11591 /*
11592 * Start with the next CPU after this_cpu so we will end with this_cpu and let a
11593 * chance for other idle cpu to pull load.
11594 */
11595 for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) {
11596 if (!idle_cpu(balance_cpu))
11597 continue;
11598
11599 /*
11600 * If this CPU gets work to do, stop the load balancing
11601 * work being done for other CPUs. Next load
11602 * balancing owner will pick it up.
11603 */
11604 if (need_resched()) {
11605 if (flags & NOHZ_STATS_KICK)
11606 has_blocked_load = true;
11607 if (flags & NOHZ_NEXT_KICK)
11608 WRITE_ONCE(nohz.needs_update, 1);
11609 goto abort;
11610 }
11611
11612 rq = cpu_rq(balance_cpu);
11613
11614 if (flags & NOHZ_STATS_KICK)
11615 has_blocked_load |= update_nohz_stats(rq);
11616
11617 /*
11618 * If time for next balance is due,
11619 * do the balance.
11620 */
11621 if (time_after_eq(jiffies, rq->next_balance)) {
11622 struct rq_flags rf;
11623
11624 rq_lock_irqsave(rq, &rf);
11625 update_rq_clock(rq);
11626 rq_unlock_irqrestore(rq, &rf);
11627
11628 if (flags & NOHZ_BALANCE_KICK)
11629 rebalance_domains(rq, CPU_IDLE);
11630 }
11631
11632 if (time_after(next_balance, rq->next_balance)) {
11633 next_balance = rq->next_balance;
11634 update_next_balance = 1;
11635 }
11636 }
11637
11638 /*
11639 * next_balance will be updated only when there is a need.
11640 * When the CPU is attached to null domain for ex, it will not be
11641 * updated.
11642 */
11643 if (likely(update_next_balance))
11644 nohz.next_balance = next_balance;
11645
11646 if (flags & NOHZ_STATS_KICK)
11647 WRITE_ONCE(nohz.next_blocked,
11648 now + msecs_to_jiffies(LOAD_AVG_PERIOD));
11649
11650 abort:
11651 /* There is still blocked load, enable periodic update */
11652 if (has_blocked_load)
11653 WRITE_ONCE(nohz.has_blocked, 1);
11654 }
11655
11656 /*
11657 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
11658 * rebalancing for all the cpus for whom scheduler ticks are stopped.
11659 */
11660 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11661 {
11662 unsigned int flags = this_rq->nohz_idle_balance;
11663
11664 if (!flags)
11665 return false;
11666
11667 this_rq->nohz_idle_balance = 0;
11668
11669 if (idle != CPU_IDLE)
11670 return false;
11671
11672 _nohz_idle_balance(this_rq, flags);
11673
11674 return true;
11675 }
11676
11677 /*
11678 * Check if we need to run the ILB for updating blocked load before entering
11679 * idle state.
11680 */
11681 void nohz_run_idle_balance(int cpu)
11682 {
11683 unsigned int flags;
11684
11685 flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
11686
11687 /*
11688 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
11689 * (ie NOHZ_STATS_KICK set) and will do the same.
11690 */
11691 if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
11692 _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK);
11693 }
11694
11695 static void nohz_newidle_balance(struct rq *this_rq)
11696 {
11697 int this_cpu = this_rq->cpu;
11698
11699 /*
11700 * This CPU doesn't want to be disturbed by scheduler
11701 * housekeeping
11702 */
11703 if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED))
11704 return;
11705
11706 /* Will wake up very soon. No time for doing anything else*/
11707 if (this_rq->avg_idle < sysctl_sched_migration_cost)
11708 return;
11709
11710 /* Don't need to update blocked load of idle CPUs*/
11711 if (!READ_ONCE(nohz.has_blocked) ||
11712 time_before(jiffies, READ_ONCE(nohz.next_blocked)))
11713 return;
11714
11715 /*
11716 * Set the need to trigger ILB in order to update blocked load
11717 * before entering idle state.
11718 */
11719 atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
11720 }
11721
11722 #else /* !CONFIG_NO_HZ_COMMON */
11723 static inline void nohz_balancer_kick(struct rq *rq) { }
11724
11725 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
11726 {
11727 return false;
11728 }
11729
11730 static inline void nohz_newidle_balance(struct rq *this_rq) { }
11731 #endif /* CONFIG_NO_HZ_COMMON */
11732
11733 /*
11734 * newidle_balance is called by schedule() if this_cpu is about to become
11735 * idle. Attempts to pull tasks from other CPUs.
11736 *
11737 * Returns:
11738 * < 0 - we released the lock and there are !fair tasks present
11739 * 0 - failed, no new tasks
11740 * > 0 - success, new (fair) tasks present
11741 */
11742 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
11743 {
11744 unsigned long next_balance = jiffies + HZ;
11745 int this_cpu = this_rq->cpu;
11746 u64 t0, t1, curr_cost = 0;
11747 struct sched_domain *sd;
11748 int pulled_task = 0;
11749
11750 update_misfit_status(NULL, this_rq);
11751
11752 /*
11753 * There is a task waiting to run. No need to search for one.
11754 * Return 0; the task will be enqueued when switching to idle.
11755 */
11756 if (this_rq->ttwu_pending)
11757 return 0;
11758
11759 /*
11760 * We must set idle_stamp _before_ calling idle_balance(), such that we
11761 * measure the duration of idle_balance() as idle time.
11762 */
11763 this_rq->idle_stamp = rq_clock(this_rq);
11764
11765 /*
11766 * Do not pull tasks towards !active CPUs...
11767 */
11768 if (!cpu_active(this_cpu))
11769 return 0;
11770
11771 /*
11772 * This is OK, because current is on_cpu, which avoids it being picked
11773 * for load-balance and preemption/IRQs are still disabled avoiding
11774 * further scheduler activity on it and we're being very careful to
11775 * re-start the picking loop.
11776 */
11777 rq_unpin_lock(this_rq, rf);
11778
11779 rcu_read_lock();
11780 sd = rcu_dereference_check_sched_domain(this_rq->sd);
11781
11782 if (!READ_ONCE(this_rq->rd->overload) ||
11783 (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) {
11784
11785 if (sd)
11786 update_next_balance(sd, &next_balance);
11787 rcu_read_unlock();
11788
11789 goto out;
11790 }
11791 rcu_read_unlock();
11792
11793 raw_spin_rq_unlock(this_rq);
11794
11795 t0 = sched_clock_cpu(this_cpu);
11796 update_blocked_averages(this_cpu);
11797
11798 rcu_read_lock();
11799 for_each_domain(this_cpu, sd) {
11800 int continue_balancing = 1;
11801 u64 domain_cost;
11802
11803 update_next_balance(sd, &next_balance);
11804
11805 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
11806 break;
11807
11808 if (sd->flags & SD_BALANCE_NEWIDLE) {
11809
11810 pulled_task = load_balance(this_cpu, this_rq,
11811 sd, CPU_NEWLY_IDLE,
11812 &continue_balancing);
11813
11814 t1 = sched_clock_cpu(this_cpu);
11815 domain_cost = t1 - t0;
11816 update_newidle_cost(sd, domain_cost);
11817
11818 curr_cost += domain_cost;
11819 t0 = t1;
11820 }
11821
11822 /*
11823 * Stop searching for tasks to pull if there are
11824 * now runnable tasks on this rq.
11825 */
11826 if (pulled_task || this_rq->nr_running > 0 ||
11827 this_rq->ttwu_pending)
11828 break;
11829 }
11830 rcu_read_unlock();
11831
11832 raw_spin_rq_lock(this_rq);
11833
11834 if (curr_cost > this_rq->max_idle_balance_cost)
11835 this_rq->max_idle_balance_cost = curr_cost;
11836
11837 /*
11838 * While browsing the domains, we released the rq lock, a task could
11839 * have been enqueued in the meantime. Since we're not going idle,
11840 * pretend we pulled a task.
11841 */
11842 if (this_rq->cfs.h_nr_running && !pulled_task)
11843 pulled_task = 1;
11844
11845 /* Is there a task of a high priority class? */
11846 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
11847 pulled_task = -1;
11848
11849 out:
11850 /* Move the next balance forward */
11851 if (time_after(this_rq->next_balance, next_balance))
11852 this_rq->next_balance = next_balance;
11853
11854 if (pulled_task)
11855 this_rq->idle_stamp = 0;
11856 else
11857 nohz_newidle_balance(this_rq);
11858
11859 rq_repin_lock(this_rq, rf);
11860
11861 return pulled_task;
11862 }
11863
11864 /*
11865 * run_rebalance_domains is triggered when needed from the scheduler tick.
11866 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
11867 */
11868 static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
11869 {
11870 struct rq *this_rq = this_rq();
11871 enum cpu_idle_type idle = this_rq->idle_balance ?
11872 CPU_IDLE : CPU_NOT_IDLE;
11873
11874 /*
11875 * If this CPU has a pending nohz_balance_kick, then do the
11876 * balancing on behalf of the other idle CPUs whose ticks are
11877 * stopped. Do nohz_idle_balance *before* rebalance_domains to
11878 * give the idle CPUs a chance to load balance. Else we may
11879 * load balance only within the local sched_domain hierarchy
11880 * and abort nohz_idle_balance altogether if we pull some load.
11881 */
11882 if (nohz_idle_balance(this_rq, idle))
11883 return;
11884
11885 /* normal load balance */
11886 update_blocked_averages(this_rq->cpu);
11887 rebalance_domains(this_rq, idle);
11888 }
11889
11890 /*
11891 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
11892 */
11893 void trigger_load_balance(struct rq *rq)
11894 {
11895 /*
11896 * Don't need to rebalance while attached to NULL domain or
11897 * runqueue CPU is not active
11898 */
11899 if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
11900 return;
11901
11902 if (time_after_eq(jiffies, rq->next_balance))
11903 raise_softirq(SCHED_SOFTIRQ);
11904
11905 nohz_balancer_kick(rq);
11906 }
11907
11908 static void rq_online_fair(struct rq *rq)
11909 {
11910 update_sysctl();
11911
11912 update_runtime_enabled(rq);
11913 }
11914
11915 static void rq_offline_fair(struct rq *rq)
11916 {
11917 update_sysctl();
11918
11919 /* Ensure any throttled groups are reachable by pick_next_task */
11920 unthrottle_offline_cfs_rqs(rq);
11921 }
11922
11923 #endif /* CONFIG_SMP */
11924
11925 #ifdef CONFIG_SCHED_CORE
11926 static inline bool
11927 __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
11928 {
11929 u64 slice = sched_slice(cfs_rq_of(se), se);
11930 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
11931
11932 return (rtime * min_nr_tasks > slice);
11933 }
11934
11935 #define MIN_NR_TASKS_DURING_FORCEIDLE 2
11936 static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
11937 {
11938 if (!sched_core_enabled(rq))
11939 return;
11940
11941 /*
11942 * If runqueue has only one task which used up its slice and
11943 * if the sibling is forced idle, then trigger schedule to
11944 * give forced idle task a chance.
11945 *
11946 * sched_slice() considers only this active rq and it gets the
11947 * whole slice. But during force idle, we have siblings acting
11948 * like a single runqueue and hence we need to consider runnable
11949 * tasks on this CPU and the forced idle CPU. Ideally, we should
11950 * go through the forced idle rq, but that would be a perf hit.
11951 * We can assume that the forced idle CPU has at least
11952 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
11953 * if we need to give up the CPU.
11954 */
11955 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
11956 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
11957 resched_curr(rq);
11958 }
11959
11960 /*
11961 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
11962 */
11963 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
11964 bool forceidle)
11965 {
11966 for_each_sched_entity(se) {
11967 struct cfs_rq *cfs_rq = cfs_rq_of(se);
11968
11969 if (forceidle) {
11970 if (cfs_rq->forceidle_seq == fi_seq)
11971 break;
11972 cfs_rq->forceidle_seq = fi_seq;
11973 }
11974
11975 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
11976 }
11977 }
11978
11979 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
11980 {
11981 struct sched_entity *se = &p->se;
11982
11983 if (p->sched_class != &fair_sched_class)
11984 return;
11985
11986 se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
11987 }
11988
11989 bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
11990 bool in_fi)
11991 {
11992 struct rq *rq = task_rq(a);
11993 const struct sched_entity *sea = &a->se;
11994 const struct sched_entity *seb = &b->se;
11995 struct cfs_rq *cfs_rqa;
11996 struct cfs_rq *cfs_rqb;
11997 s64 delta;
11998
11999 SCHED_WARN_ON(task_rq(b)->core != rq->core);
12000
12001 #ifdef CONFIG_FAIR_GROUP_SCHED
12002 /*
12003 * Find an se in the hierarchy for tasks a and b, such that the se's
12004 * are immediate siblings.
12005 */
12006 while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
12007 int sea_depth = sea->depth;
12008 int seb_depth = seb->depth;
12009
12010 if (sea_depth >= seb_depth)
12011 sea = parent_entity(sea);
12012 if (sea_depth <= seb_depth)
12013 seb = parent_entity(seb);
12014 }
12015
12016 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
12017 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
12018
12019 cfs_rqa = sea->cfs_rq;
12020 cfs_rqb = seb->cfs_rq;
12021 #else
12022 cfs_rqa = &task_rq(a)->cfs;
12023 cfs_rqb = &task_rq(b)->cfs;
12024 #endif
12025
12026 /*
12027 * Find delta after normalizing se's vruntime with its cfs_rq's
12028 * min_vruntime_fi, which would have been updated in prior calls
12029 * to se_fi_update().
12030 */
12031 delta = (s64)(sea->vruntime - seb->vruntime) +
12032 (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
12033
12034 return delta > 0;
12035 }
12036 #else
12037 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
12038 #endif
12039
12040 /*
12041 * scheduler tick hitting a task of our scheduling class.
12042 *
12043 * NOTE: This function can be called remotely by the tick offload that
12044 * goes along full dynticks. Therefore no local assumption can be made
12045 * and everything must be accessed through the @rq and @curr passed in
12046 * parameters.
12047 */
12048 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
12049 {
12050 struct cfs_rq *cfs_rq;
12051 struct sched_entity *se = &curr->se;
12052
12053 for_each_sched_entity(se) {
12054 cfs_rq = cfs_rq_of(se);
12055 entity_tick(cfs_rq, se, queued);
12056 }
12057
12058 if (static_branch_unlikely(&sched_numa_balancing))
12059 task_tick_numa(rq, curr);
12060
12061 update_misfit_status(curr, rq);
12062 update_overutilized_status(task_rq(curr));
12063
12064 task_tick_core(rq, curr);
12065 }
12066
12067 /*
12068 * called on fork with the child task as argument from the parent's context
12069 * - child not yet on the tasklist
12070 * - preemption disabled
12071 */
12072 static void task_fork_fair(struct task_struct *p)
12073 {
12074 struct cfs_rq *cfs_rq;
12075 struct sched_entity *se = &p->se, *curr;
12076 struct rq *rq = this_rq();
12077 struct rq_flags rf;
12078
12079 rq_lock(rq, &rf);
12080 update_rq_clock(rq);
12081
12082 cfs_rq = task_cfs_rq(current);
12083 curr = cfs_rq->curr;
12084 if (curr) {
12085 update_curr(cfs_rq);
12086 se->vruntime = curr->vruntime;
12087 }
12088 place_entity(cfs_rq, se, 1);
12089
12090 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
12091 /*
12092 * Upon rescheduling, sched_class::put_prev_task() will place
12093 * 'current' within the tree based on its new key value.
12094 */
12095 swap(curr->vruntime, se->vruntime);
12096 resched_curr(rq);
12097 }
12098
12099 se->vruntime -= cfs_rq->min_vruntime;
12100 rq_unlock(rq, &rf);
12101 }
12102
12103 /*
12104 * Priority of the task has changed. Check to see if we preempt
12105 * the current task.
12106 */
12107 static void
12108 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
12109 {
12110 if (!task_on_rq_queued(p))
12111 return;
12112
12113 if (rq->cfs.nr_running == 1)
12114 return;
12115
12116 /*
12117 * Reschedule if we are currently running on this runqueue and
12118 * our priority decreased, or if we are not currently running on
12119 * this runqueue and our priority is higher than the current's
12120 */
12121 if (task_current(rq, p)) {
12122 if (p->prio > oldprio)
12123 resched_curr(rq);
12124 } else
12125 check_preempt_curr(rq, p, 0);
12126 }
12127
12128 static inline bool vruntime_normalized(struct task_struct *p)
12129 {
12130 struct sched_entity *se = &p->se;
12131
12132 /*
12133 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
12134 * the dequeue_entity(.flags=0) will already have normalized the
12135 * vruntime.
12136 */
12137 if (p->on_rq)
12138 return true;
12139
12140 /*
12141 * When !on_rq, vruntime of the task has usually NOT been normalized.
12142 * But there are some cases where it has already been normalized:
12143 *
12144 * - A forked child which is waiting for being woken up by
12145 * wake_up_new_task().
12146 * - A task which has been woken up by try_to_wake_up() and
12147 * waiting for actually being woken up by sched_ttwu_pending().
12148 */
12149 if (!se->sum_exec_runtime ||
12150 (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup))
12151 return true;
12152
12153 return false;
12154 }
12155
12156 #ifdef CONFIG_FAIR_GROUP_SCHED
12157 /*
12158 * Propagate the changes of the sched_entity across the tg tree to make it
12159 * visible to the root
12160 */
12161 static void propagate_entity_cfs_rq(struct sched_entity *se)
12162 {
12163 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12164
12165 if (cfs_rq_throttled(cfs_rq))
12166 return;
12167
12168 if (!throttled_hierarchy(cfs_rq))
12169 list_add_leaf_cfs_rq(cfs_rq);
12170
12171 /* Start to propagate at parent */
12172 se = se->parent;
12173
12174 for_each_sched_entity(se) {
12175 cfs_rq = cfs_rq_of(se);
12176
12177 update_load_avg(cfs_rq, se, UPDATE_TG);
12178
12179 if (cfs_rq_throttled(cfs_rq))
12180 break;
12181
12182 if (!throttled_hierarchy(cfs_rq))
12183 list_add_leaf_cfs_rq(cfs_rq);
12184 }
12185 }
12186 #else
12187 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
12188 #endif
12189
12190 static void detach_entity_cfs_rq(struct sched_entity *se)
12191 {
12192 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12193
12194 #ifdef CONFIG_SMP
12195 /*
12196 * In case the task sched_avg hasn't been attached:
12197 * - A forked task which hasn't been woken up by wake_up_new_task().
12198 * - A task which has been woken up by try_to_wake_up() but is
12199 * waiting for actually being woken up by sched_ttwu_pending().
12200 */
12201 if (!se->avg.last_update_time)
12202 return;
12203 #endif
12204
12205 /* Catch up with the cfs_rq and remove our load when we leave */
12206 update_load_avg(cfs_rq, se, 0);
12207 detach_entity_load_avg(cfs_rq, se);
12208 update_tg_load_avg(cfs_rq);
12209 propagate_entity_cfs_rq(se);
12210 }
12211
12212 static void attach_entity_cfs_rq(struct sched_entity *se)
12213 {
12214 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12215
12216 /* Synchronize entity with its cfs_rq */
12217 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
12218 attach_entity_load_avg(cfs_rq, se);
12219 update_tg_load_avg(cfs_rq);
12220 propagate_entity_cfs_rq(se);
12221 }
12222
12223 static void detach_task_cfs_rq(struct task_struct *p)
12224 {
12225 struct sched_entity *se = &p->se;
12226 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12227
12228 if (!vruntime_normalized(p)) {
12229 /*
12230 * Fix up our vruntime so that the current sleep doesn't
12231 * cause 'unlimited' sleep bonus.
12232 */
12233 place_entity(cfs_rq, se, 0);
12234 se->vruntime -= cfs_rq->min_vruntime;
12235 }
12236
12237 detach_entity_cfs_rq(se);
12238 }
12239
12240 static void attach_task_cfs_rq(struct task_struct *p)
12241 {
12242 struct sched_entity *se = &p->se;
12243 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12244
12245 attach_entity_cfs_rq(se);
12246
12247 if (!vruntime_normalized(p))
12248 se->vruntime += cfs_rq->min_vruntime;
12249 }
12250
12251 static void switched_from_fair(struct rq *rq, struct task_struct *p)
12252 {
12253 detach_task_cfs_rq(p);
12254 }
12255
12256 static void switched_to_fair(struct rq *rq, struct task_struct *p)
12257 {
12258 attach_task_cfs_rq(p);
12259
12260 if (task_on_rq_queued(p)) {
12261 /*
12262 * We were most likely switched from sched_rt, so
12263 * kick off the schedule if running, otherwise just see
12264 * if we can still preempt the current task.
12265 */
12266 if (task_current(rq, p))
12267 resched_curr(rq);
12268 else
12269 check_preempt_curr(rq, p, 0);
12270 }
12271 }
12272
12273 /* Account for a task changing its policy or group.
12274 *
12275 * This routine is mostly called to set cfs_rq->curr field when a task
12276 * migrates between groups/classes.
12277 */
12278 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
12279 {
12280 struct sched_entity *se = &p->se;
12281
12282 #ifdef CONFIG_SMP
12283 if (task_on_rq_queued(p)) {
12284 /*
12285 * Move the next running task to the front of the list, so our
12286 * cfs_tasks list becomes MRU one.
12287 */
12288 list_move(&se->group_node, &rq->cfs_tasks);
12289 }
12290 #endif
12291
12292 for_each_sched_entity(se) {
12293 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12294
12295 set_next_entity(cfs_rq, se);
12296 /* ensure bandwidth has been allocated on our new cfs_rq */
12297 account_cfs_rq_runtime(cfs_rq, 0);
12298 }
12299 }
12300
12301 void init_cfs_rq(struct cfs_rq *cfs_rq)
12302 {
12303 cfs_rq->tasks_timeline = RB_ROOT_CACHED;
12304 u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
12305 #ifdef CONFIG_SMP
12306 raw_spin_lock_init(&cfs_rq->removed.lock);
12307 #endif
12308 }
12309
12310 #ifdef CONFIG_FAIR_GROUP_SCHED
12311 static void task_change_group_fair(struct task_struct *p)
12312 {
12313 /*
12314 * We couldn't detach or attach a forked task which
12315 * hasn't been woken up by wake_up_new_task().
12316 */
12317 if (READ_ONCE(p->__state) == TASK_NEW)
12318 return;
12319
12320 detach_task_cfs_rq(p);
12321
12322 #ifdef CONFIG_SMP
12323 /* Tell se's cfs_rq has been changed -- migrated */
12324 p->se.avg.last_update_time = 0;
12325 #endif
12326 set_task_rq(p, task_cpu(p));
12327 attach_task_cfs_rq(p);
12328 }
12329
12330 void free_fair_sched_group(struct task_group *tg)
12331 {
12332 int i;
12333
12334 for_each_possible_cpu(i) {
12335 if (tg->cfs_rq)
12336 kfree(tg->cfs_rq[i]);
12337 if (tg->se)
12338 kfree(tg->se[i]);
12339 }
12340
12341 kfree(tg->cfs_rq);
12342 kfree(tg->se);
12343 }
12344
12345 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
12346 {
12347 struct sched_entity *se;
12348 struct cfs_rq *cfs_rq;
12349 int i;
12350
12351 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
12352 if (!tg->cfs_rq)
12353 goto err;
12354 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
12355 if (!tg->se)
12356 goto err;
12357
12358 tg->shares = NICE_0_LOAD;
12359
12360 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
12361
12362 for_each_possible_cpu(i) {
12363 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
12364 GFP_KERNEL, cpu_to_node(i));
12365 if (!cfs_rq)
12366 goto err;
12367
12368 se = kzalloc_node(sizeof(struct sched_entity_stats),
12369 GFP_KERNEL, cpu_to_node(i));
12370 if (!se)
12371 goto err_free_rq;
12372
12373 init_cfs_rq(cfs_rq);
12374 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
12375 init_entity_runnable_average(se);
12376 }
12377
12378 return 1;
12379
12380 err_free_rq:
12381 kfree(cfs_rq);
12382 err:
12383 return 0;
12384 }
12385
12386 void online_fair_sched_group(struct task_group *tg)
12387 {
12388 struct sched_entity *se;
12389 struct rq_flags rf;
12390 struct rq *rq;
12391 int i;
12392
12393 for_each_possible_cpu(i) {
12394 rq = cpu_rq(i);
12395 se = tg->se[i];
12396 rq_lock_irq(rq, &rf);
12397 update_rq_clock(rq);
12398 attach_entity_cfs_rq(se);
12399 sync_throttle(tg, i);
12400 rq_unlock_irq(rq, &rf);
12401 }
12402 }
12403
12404 void unregister_fair_sched_group(struct task_group *tg)
12405 {
12406 unsigned long flags;
12407 struct rq *rq;
12408 int cpu;
12409
12410 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
12411
12412 for_each_possible_cpu(cpu) {
12413 if (tg->se[cpu])
12414 remove_entity_load_avg(tg->se[cpu]);
12415
12416 /*
12417 * Only empty task groups can be destroyed; so we can speculatively
12418 * check on_list without danger of it being re-added.
12419 */
12420 if (!tg->cfs_rq[cpu]->on_list)
12421 continue;
12422
12423 rq = cpu_rq(cpu);
12424
12425 raw_spin_rq_lock_irqsave(rq, flags);
12426 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
12427 raw_spin_rq_unlock_irqrestore(rq, flags);
12428 }
12429 }
12430
12431 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
12432 struct sched_entity *se, int cpu,
12433 struct sched_entity *parent)
12434 {
12435 struct rq *rq = cpu_rq(cpu);
12436
12437 cfs_rq->tg = tg;
12438 cfs_rq->rq = rq;
12439 init_cfs_rq_runtime(cfs_rq);
12440
12441 tg->cfs_rq[cpu] = cfs_rq;
12442 tg->se[cpu] = se;
12443
12444 /* se could be NULL for root_task_group */
12445 if (!se)
12446 return;
12447
12448 if (!parent) {
12449 se->cfs_rq = &rq->cfs;
12450 se->depth = 0;
12451 } else {
12452 se->cfs_rq = parent->my_q;
12453 se->depth = parent->depth + 1;
12454 }
12455
12456 se->my_q = cfs_rq;
12457 /* guarantee group entities always have weight */
12458 update_load_set(&se->load, NICE_0_LOAD);
12459 se->parent = parent;
12460 }
12461
12462 static DEFINE_MUTEX(shares_mutex);
12463
12464 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
12465 {
12466 int i;
12467
12468 lockdep_assert_held(&shares_mutex);
12469
12470 /*
12471 * We can't change the weight of the root cgroup.
12472 */
12473 if (!tg->se[0])
12474 return -EINVAL;
12475
12476 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
12477
12478 if (tg->shares == shares)
12479 return 0;
12480
12481 tg->shares = shares;
12482 for_each_possible_cpu(i) {
12483 struct rq *rq = cpu_rq(i);
12484 struct sched_entity *se = tg->se[i];
12485 struct rq_flags rf;
12486
12487 /* Propagate contribution to hierarchy */
12488 rq_lock_irqsave(rq, &rf);
12489 update_rq_clock(rq);
12490 for_each_sched_entity(se) {
12491 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
12492 update_cfs_group(se);
12493 }
12494 rq_unlock_irqrestore(rq, &rf);
12495 }
12496
12497 return 0;
12498 }
12499
12500 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
12501 {
12502 int ret;
12503
12504 mutex_lock(&shares_mutex);
12505 if (tg_is_idle(tg))
12506 ret = -EINVAL;
12507 else
12508 ret = __sched_group_set_shares(tg, shares);
12509 mutex_unlock(&shares_mutex);
12510
12511 return ret;
12512 }
12513
12514 int sched_group_set_idle(struct task_group *tg, long idle)
12515 {
12516 int i;
12517
12518 if (tg == &root_task_group)
12519 return -EINVAL;
12520
12521 if (idle < 0 || idle > 1)
12522 return -EINVAL;
12523
12524 mutex_lock(&shares_mutex);
12525
12526 if (tg->idle == idle) {
12527 mutex_unlock(&shares_mutex);
12528 return 0;
12529 }
12530
12531 tg->idle = idle;
12532
12533 for_each_possible_cpu(i) {
12534 struct rq *rq = cpu_rq(i);
12535 struct sched_entity *se = tg->se[i];
12536 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
12537 bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
12538 long idle_task_delta;
12539 struct rq_flags rf;
12540
12541 rq_lock_irqsave(rq, &rf);
12542
12543 grp_cfs_rq->idle = idle;
12544 if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
12545 goto next_cpu;
12546
12547 if (se->on_rq) {
12548 parent_cfs_rq = cfs_rq_of(se);
12549 if (cfs_rq_is_idle(grp_cfs_rq))
12550 parent_cfs_rq->idle_nr_running++;
12551 else
12552 parent_cfs_rq->idle_nr_running--;
12553 }
12554
12555 idle_task_delta = grp_cfs_rq->h_nr_running -
12556 grp_cfs_rq->idle_h_nr_running;
12557 if (!cfs_rq_is_idle(grp_cfs_rq))
12558 idle_task_delta *= -1;
12559
12560 for_each_sched_entity(se) {
12561 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12562
12563 if (!se->on_rq)
12564 break;
12565
12566 cfs_rq->idle_h_nr_running += idle_task_delta;
12567
12568 /* Already accounted at parent level and above. */
12569 if (cfs_rq_is_idle(cfs_rq))
12570 break;
12571 }
12572
12573 next_cpu:
12574 rq_unlock_irqrestore(rq, &rf);
12575 }
12576
12577 /* Idle groups have minimum weight. */
12578 if (tg_is_idle(tg))
12579 __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
12580 else
12581 __sched_group_set_shares(tg, NICE_0_LOAD);
12582
12583 mutex_unlock(&shares_mutex);
12584 return 0;
12585 }
12586
12587 #else /* CONFIG_FAIR_GROUP_SCHED */
12588
12589 void free_fair_sched_group(struct task_group *tg) { }
12590
12591 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
12592 {
12593 return 1;
12594 }
12595
12596 void online_fair_sched_group(struct task_group *tg) { }
12597
12598 void unregister_fair_sched_group(struct task_group *tg) { }
12599
12600 #endif /* CONFIG_FAIR_GROUP_SCHED */
12601
12602
12603 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
12604 {
12605 struct sched_entity *se = &task->se;
12606 unsigned int rr_interval = 0;
12607
12608 /*
12609 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
12610 * idle runqueue:
12611 */
12612 if (rq->cfs.load.weight)
12613 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
12614
12615 return rr_interval;
12616 }
12617
12618 /*
12619 * All the scheduling class methods:
12620 */
12621 DEFINE_SCHED_CLASS(fair) = {
12622
12623 .enqueue_task = enqueue_task_fair,
12624 .dequeue_task = dequeue_task_fair,
12625 .yield_task = yield_task_fair,
12626 .yield_to_task = yield_to_task_fair,
12627
12628 .check_preempt_curr = check_preempt_wakeup,
12629
12630 .pick_next_task = __pick_next_task_fair,
12631 .put_prev_task = put_prev_task_fair,
12632 .set_next_task = set_next_task_fair,
12633
12634 #ifdef CONFIG_SMP
12635 .balance = balance_fair,
12636 .pick_task = pick_task_fair,
12637 .select_task_rq = select_task_rq_fair,
12638 .migrate_task_rq = migrate_task_rq_fair,
12639
12640 .rq_online = rq_online_fair,
12641 .rq_offline = rq_offline_fair,
12642
12643 .task_dead = task_dead_fair,
12644 .set_cpus_allowed = set_cpus_allowed_common,
12645 #endif
12646
12647 .task_tick = task_tick_fair,
12648 .task_fork = task_fork_fair,
12649
12650 .prio_changed = prio_changed_fair,
12651 .switched_from = switched_from_fair,
12652 .switched_to = switched_to_fair,
12653
12654 .get_rr_interval = get_rr_interval_fair,
12655
12656 .update_curr = update_curr_fair,
12657
12658 #ifdef CONFIG_FAIR_GROUP_SCHED
12659 .task_change_group = task_change_group_fair,
12660 #endif
12661
12662 #ifdef CONFIG_UCLAMP_TASK
12663 .uclamp_enabled = 1,
12664 #endif
12665 };
12666
12667 #ifdef CONFIG_SCHED_DEBUG
12668 void print_cfs_stats(struct seq_file *m, int cpu)
12669 {
12670 struct cfs_rq *cfs_rq, *pos;
12671
12672 rcu_read_lock();
12673 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
12674 print_cfs_rq(m, cpu, cfs_rq);
12675 rcu_read_unlock();
12676 }
12677
12678 #ifdef CONFIG_NUMA_BALANCING
12679 void show_numa_stats(struct task_struct *p, struct seq_file *m)
12680 {
12681 int node;
12682 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
12683 struct numa_group *ng;
12684
12685 rcu_read_lock();
12686 ng = rcu_dereference(p->numa_group);
12687 for_each_online_node(node) {
12688 if (p->numa_faults) {
12689 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
12690 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
12691 }
12692 if (ng) {
12693 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
12694 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
12695 }
12696 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
12697 }
12698 rcu_read_unlock();
12699 }
12700 #endif /* CONFIG_NUMA_BALANCING */
12701 #endif /* CONFIG_SCHED_DEBUG */
12702
12703 __init void init_sched_fair_class(void)
12704 {
12705 #ifdef CONFIG_SMP
12706 int i;
12707
12708 for_each_possible_cpu(i) {
12709 zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
12710 zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
12711
12712 #ifdef CONFIG_CFS_BANDWIDTH
12713 INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
12714 INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list);
12715 #endif
12716 }
12717
12718 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
12719
12720 #ifdef CONFIG_NO_HZ_COMMON
12721 nohz.next_balance = jiffies;
12722 nohz.next_blocked = jiffies;
12723 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
12724 #endif
12725 #endif /* SMP */
12726
12727 }