1 // SPDX-License-Identifier: GPL-2.0
3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
23 #include <linux/energy_model.h>
24 #include <linux/mmap_lock.h>
25 #include <linux/hugetlb_inline.h>
26 #include <linux/jiffies.h>
27 #include <linux/mm_api.h>
28 #include <linux/highmem.h>
29 #include <linux/spinlock_api.h>
30 #include <linux/cpumask_api.h>
31 #include <linux/lockdep_api.h>
32 #include <linux/softirq.h>
33 #include <linux/refcount_api.h>
34 #include <linux/topology.h>
35 #include <linux/sched/clock.h>
36 #include <linux/sched/cond_resched.h>
37 #include <linux/sched/cputime.h>
38 #include <linux/sched/isolation.h>
39 #include <linux/sched/nohz.h>
41 #include <linux/cpuidle.h>
42 #include <linux/interrupt.h>
43 #include <linux/memory-tiers.h>
44 #include <linux/mempolicy.h>
45 #include <linux/mutex_api.h>
46 #include <linux/profile.h>
47 #include <linux/psi.h>
48 #include <linux/ratelimit.h>
49 #include <linux/task_work.h>
51 #include <asm/switch_to.h>
53 #include <linux/sched/cond_resched.h>
57 #include "autogroup.h"
60 * Targeted preemption latency for CPU-bound tasks:
62 * NOTE: this latency value is not the same as the concept of
63 * 'timeslice length' - timeslices in CFS are of variable length
64 * and have no persistent notion like in traditional, time-slice
65 * based scheduling concepts.
67 * (to see the precise effective timeslice length of your workload,
68 * run vmstat and monitor the context-switches (cs) field)
70 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
72 unsigned int sysctl_sched_latency
= 6000000ULL;
73 static unsigned int normalized_sysctl_sched_latency
= 6000000ULL;
76 * The initial- and re-scaling of tunables is configurable
80 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
81 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
82 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
84 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
86 unsigned int sysctl_sched_tunable_scaling
= SCHED_TUNABLESCALING_LOG
;
89 * Minimal preemption granularity for CPU-bound tasks:
91 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
93 unsigned int sysctl_sched_min_granularity
= 750000ULL;
94 static unsigned int normalized_sysctl_sched_min_granularity
= 750000ULL;
97 * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
98 * Applies only when SCHED_IDLE tasks compete with normal tasks.
100 * (default: 0.75 msec)
102 unsigned int sysctl_sched_idle_min_granularity
= 750000ULL;
105 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
107 static unsigned int sched_nr_latency
= 8;
110 * After fork, child runs first. If set to 0 (default) then
111 * parent will (try to) run first.
113 unsigned int sysctl_sched_child_runs_first __read_mostly
;
116 * SCHED_OTHER wake-up granularity.
118 * This option delays the preemption effects of decoupled workloads
119 * and reduces their over-scheduling. Synchronous workloads will still
120 * have immediate wakeup/sleep latencies.
122 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
124 unsigned int sysctl_sched_wakeup_granularity
= 1000000UL;
125 static unsigned int normalized_sysctl_sched_wakeup_granularity
= 1000000UL;
127 const_debug
unsigned int sysctl_sched_migration_cost
= 500000UL;
129 int sched_thermal_decay_shift
;
130 static int __init
setup_sched_thermal_decay_shift(char *str
)
134 if (kstrtoint(str
, 0, &_shift
))
135 pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
137 sched_thermal_decay_shift
= clamp(_shift
, 0, 10);
140 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift
);
144 * For asym packing, by default the lower numbered CPU has higher priority.
146 int __weak
arch_asym_cpu_priority(int cpu
)
152 * The margin used when comparing utilization with CPU capacity.
156 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
159 * The margin used when comparing CPU capacities.
160 * is 'cap1' noticeably greater than 'cap2'
164 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
167 #ifdef CONFIG_CFS_BANDWIDTH
169 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
170 * each time a cfs_rq requests quota.
172 * Note: in the case that the slice exceeds the runtime remaining (either due
173 * to consumption or the quota being specified to be smaller than the slice)
174 * we will always only issue the remaining available time.
176 * (default: 5 msec, units: microseconds)
178 static unsigned int sysctl_sched_cfs_bandwidth_slice
= 5000UL;
181 #ifdef CONFIG_NUMA_BALANCING
182 /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
183 static unsigned int sysctl_numa_balancing_promote_rate_limit
= 65536;
187 static struct ctl_table sched_fair_sysctls
[] = {
189 .procname
= "sched_child_runs_first",
190 .data
= &sysctl_sched_child_runs_first
,
191 .maxlen
= sizeof(unsigned int),
193 .proc_handler
= proc_dointvec
,
195 #ifdef CONFIG_CFS_BANDWIDTH
197 .procname
= "sched_cfs_bandwidth_slice_us",
198 .data
= &sysctl_sched_cfs_bandwidth_slice
,
199 .maxlen
= sizeof(unsigned int),
201 .proc_handler
= proc_dointvec_minmax
,
202 .extra1
= SYSCTL_ONE
,
205 #ifdef CONFIG_NUMA_BALANCING
207 .procname
= "numa_balancing_promote_rate_limit_MBps",
208 .data
= &sysctl_numa_balancing_promote_rate_limit
,
209 .maxlen
= sizeof(unsigned int),
211 .proc_handler
= proc_dointvec_minmax
,
212 .extra1
= SYSCTL_ZERO
,
214 #endif /* CONFIG_NUMA_BALANCING */
218 static int __init
sched_fair_sysctl_init(void)
220 register_sysctl_init("kernel", sched_fair_sysctls
);
223 late_initcall(sched_fair_sysctl_init
);
226 static inline void update_load_add(struct load_weight
*lw
, unsigned long inc
)
232 static inline void update_load_sub(struct load_weight
*lw
, unsigned long dec
)
238 static inline void update_load_set(struct load_weight
*lw
, unsigned long w
)
245 * Increase the granularity value when there are more CPUs,
246 * because with more CPUs the 'effective latency' as visible
247 * to users decreases. But the relationship is not linear,
248 * so pick a second-best guess by going with the log2 of the
251 * This idea comes from the SD scheduler of Con Kolivas:
253 static unsigned int get_update_sysctl_factor(void)
255 unsigned int cpus
= min_t(unsigned int, num_online_cpus(), 8);
258 switch (sysctl_sched_tunable_scaling
) {
259 case SCHED_TUNABLESCALING_NONE
:
262 case SCHED_TUNABLESCALING_LINEAR
:
265 case SCHED_TUNABLESCALING_LOG
:
267 factor
= 1 + ilog2(cpus
);
274 static void update_sysctl(void)
276 unsigned int factor
= get_update_sysctl_factor();
278 #define SET_SYSCTL(name) \
279 (sysctl_##name = (factor) * normalized_sysctl_##name)
280 SET_SYSCTL(sched_min_granularity
);
281 SET_SYSCTL(sched_latency
);
282 SET_SYSCTL(sched_wakeup_granularity
);
286 void __init
sched_init_granularity(void)
291 #define WMULT_CONST (~0U)
292 #define WMULT_SHIFT 32
294 static void __update_inv_weight(struct load_weight
*lw
)
298 if (likely(lw
->inv_weight
))
301 w
= scale_load_down(lw
->weight
);
303 if (BITS_PER_LONG
> 32 && unlikely(w
>= WMULT_CONST
))
305 else if (unlikely(!w
))
306 lw
->inv_weight
= WMULT_CONST
;
308 lw
->inv_weight
= WMULT_CONST
/ w
;
312 * delta_exec * weight / lw.weight
314 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
316 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
317 * we're guaranteed shift stays positive because inv_weight is guaranteed to
318 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
320 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
321 * weight/lw.weight <= 1, and therefore our shift will also be positive.
323 static u64
__calc_delta(u64 delta_exec
, unsigned long weight
, struct load_weight
*lw
)
325 u64 fact
= scale_load_down(weight
);
326 u32 fact_hi
= (u32
)(fact
>> 32);
327 int shift
= WMULT_SHIFT
;
330 __update_inv_weight(lw
);
332 if (unlikely(fact_hi
)) {
338 fact
= mul_u32_u32(fact
, lw
->inv_weight
);
340 fact_hi
= (u32
)(fact
>> 32);
347 return mul_u64_u32_shr(delta_exec
, fact
, shift
);
351 const struct sched_class fair_sched_class
;
353 /**************************************************************
354 * CFS operations on generic schedulable entities:
357 #ifdef CONFIG_FAIR_GROUP_SCHED
359 /* Walk up scheduling entities hierarchy */
360 #define for_each_sched_entity(se) \
361 for (; se; se = se->parent)
363 static inline bool list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
365 struct rq
*rq
= rq_of(cfs_rq
);
366 int cpu
= cpu_of(rq
);
369 return rq
->tmp_alone_branch
== &rq
->leaf_cfs_rq_list
;
374 * Ensure we either appear before our parent (if already
375 * enqueued) or force our parent to appear after us when it is
376 * enqueued. The fact that we always enqueue bottom-up
377 * reduces this to two cases and a special case for the root
378 * cfs_rq. Furthermore, it also means that we will always reset
379 * tmp_alone_branch either when the branch is connected
380 * to a tree or when we reach the top of the tree
382 if (cfs_rq
->tg
->parent
&&
383 cfs_rq
->tg
->parent
->cfs_rq
[cpu
]->on_list
) {
385 * If parent is already on the list, we add the child
386 * just before. Thanks to circular linked property of
387 * the list, this means to put the child at the tail
388 * of the list that starts by parent.
390 list_add_tail_rcu(&cfs_rq
->leaf_cfs_rq_list
,
391 &(cfs_rq
->tg
->parent
->cfs_rq
[cpu
]->leaf_cfs_rq_list
));
393 * The branch is now connected to its tree so we can
394 * reset tmp_alone_branch to the beginning of the
397 rq
->tmp_alone_branch
= &rq
->leaf_cfs_rq_list
;
401 if (!cfs_rq
->tg
->parent
) {
403 * cfs rq without parent should be put
404 * at the tail of the list.
406 list_add_tail_rcu(&cfs_rq
->leaf_cfs_rq_list
,
407 &rq
->leaf_cfs_rq_list
);
409 * We have reach the top of a tree so we can reset
410 * tmp_alone_branch to the beginning of the list.
412 rq
->tmp_alone_branch
= &rq
->leaf_cfs_rq_list
;
417 * The parent has not already been added so we want to
418 * make sure that it will be put after us.
419 * tmp_alone_branch points to the begin of the branch
420 * where we will add parent.
422 list_add_rcu(&cfs_rq
->leaf_cfs_rq_list
, rq
->tmp_alone_branch
);
424 * update tmp_alone_branch to points to the new begin
427 rq
->tmp_alone_branch
= &cfs_rq
->leaf_cfs_rq_list
;
431 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
433 if (cfs_rq
->on_list
) {
434 struct rq
*rq
= rq_of(cfs_rq
);
437 * With cfs_rq being unthrottled/throttled during an enqueue,
438 * it can happen the tmp_alone_branch points the a leaf that
439 * we finally want to del. In this case, tmp_alone_branch moves
440 * to the prev element but it will point to rq->leaf_cfs_rq_list
441 * at the end of the enqueue.
443 if (rq
->tmp_alone_branch
== &cfs_rq
->leaf_cfs_rq_list
)
444 rq
->tmp_alone_branch
= cfs_rq
->leaf_cfs_rq_list
.prev
;
446 list_del_rcu(&cfs_rq
->leaf_cfs_rq_list
);
451 static inline void assert_list_leaf_cfs_rq(struct rq
*rq
)
453 SCHED_WARN_ON(rq
->tmp_alone_branch
!= &rq
->leaf_cfs_rq_list
);
456 /* Iterate thr' all leaf cfs_rq's on a runqueue */
457 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
458 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
461 /* Do the two (enqueued) entities belong to the same group ? */
462 static inline struct cfs_rq
*
463 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
465 if (se
->cfs_rq
== pse
->cfs_rq
)
471 static inline struct sched_entity
*parent_entity(const struct sched_entity
*se
)
477 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
479 int se_depth
, pse_depth
;
482 * preemption test can be made between sibling entities who are in the
483 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
484 * both tasks until we find their ancestors who are siblings of common
488 /* First walk up until both entities are at same depth */
489 se_depth
= (*se
)->depth
;
490 pse_depth
= (*pse
)->depth
;
492 while (se_depth
> pse_depth
) {
494 *se
= parent_entity(*se
);
497 while (pse_depth
> se_depth
) {
499 *pse
= parent_entity(*pse
);
502 while (!is_same_group(*se
, *pse
)) {
503 *se
= parent_entity(*se
);
504 *pse
= parent_entity(*pse
);
508 static int tg_is_idle(struct task_group
*tg
)
513 static int cfs_rq_is_idle(struct cfs_rq
*cfs_rq
)
515 return cfs_rq
->idle
> 0;
518 static int se_is_idle(struct sched_entity
*se
)
520 if (entity_is_task(se
))
521 return task_has_idle_policy(task_of(se
));
522 return cfs_rq_is_idle(group_cfs_rq(se
));
525 #else /* !CONFIG_FAIR_GROUP_SCHED */
527 #define for_each_sched_entity(se) \
528 for (; se; se = NULL)
530 static inline bool list_add_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
535 static inline void list_del_leaf_cfs_rq(struct cfs_rq
*cfs_rq
)
539 static inline void assert_list_leaf_cfs_rq(struct rq
*rq
)
543 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
544 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
546 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
552 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
556 static inline int tg_is_idle(struct task_group
*tg
)
561 static int cfs_rq_is_idle(struct cfs_rq
*cfs_rq
)
566 static int se_is_idle(struct sched_entity
*se
)
571 #endif /* CONFIG_FAIR_GROUP_SCHED */
573 static __always_inline
574 void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, u64 delta_exec
);
576 /**************************************************************
577 * Scheduling class tree data structure manipulation methods:
580 static inline u64
max_vruntime(u64 max_vruntime
, u64 vruntime
)
582 s64 delta
= (s64
)(vruntime
- max_vruntime
);
584 max_vruntime
= vruntime
;
589 static inline u64
min_vruntime(u64 min_vruntime
, u64 vruntime
)
591 s64 delta
= (s64
)(vruntime
- min_vruntime
);
593 min_vruntime
= vruntime
;
598 static inline bool entity_before(const struct sched_entity
*a
,
599 const struct sched_entity
*b
)
601 return (s64
)(a
->vruntime
- b
->vruntime
) < 0;
604 #define __node_2_se(node) \
605 rb_entry((node), struct sched_entity, run_node)
607 static void update_min_vruntime(struct cfs_rq
*cfs_rq
)
609 struct sched_entity
*curr
= cfs_rq
->curr
;
610 struct rb_node
*leftmost
= rb_first_cached(&cfs_rq
->tasks_timeline
);
612 u64 vruntime
= cfs_rq
->min_vruntime
;
616 vruntime
= curr
->vruntime
;
621 if (leftmost
) { /* non-empty tree */
622 struct sched_entity
*se
= __node_2_se(leftmost
);
625 vruntime
= se
->vruntime
;
627 vruntime
= min_vruntime(vruntime
, se
->vruntime
);
630 /* ensure we never gain time by being placed backwards. */
631 u64_u32_store(cfs_rq
->min_vruntime
,
632 max_vruntime(cfs_rq
->min_vruntime
, vruntime
));
635 static inline bool __entity_less(struct rb_node
*a
, const struct rb_node
*b
)
637 return entity_before(__node_2_se(a
), __node_2_se(b
));
641 * Enqueue an entity into the rb-tree:
643 static void __enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
645 rb_add_cached(&se
->run_node
, &cfs_rq
->tasks_timeline
, __entity_less
);
648 static void __dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
650 rb_erase_cached(&se
->run_node
, &cfs_rq
->tasks_timeline
);
653 struct sched_entity
*__pick_first_entity(struct cfs_rq
*cfs_rq
)
655 struct rb_node
*left
= rb_first_cached(&cfs_rq
->tasks_timeline
);
660 return __node_2_se(left
);
663 static struct sched_entity
*__pick_next_entity(struct sched_entity
*se
)
665 struct rb_node
*next
= rb_next(&se
->run_node
);
670 return __node_2_se(next
);
673 #ifdef CONFIG_SCHED_DEBUG
674 struct sched_entity
*__pick_last_entity(struct cfs_rq
*cfs_rq
)
676 struct rb_node
*last
= rb_last(&cfs_rq
->tasks_timeline
.rb_root
);
681 return __node_2_se(last
);
684 /**************************************************************
685 * Scheduling class statistics methods:
688 int sched_update_scaling(void)
690 unsigned int factor
= get_update_sysctl_factor();
692 sched_nr_latency
= DIV_ROUND_UP(sysctl_sched_latency
,
693 sysctl_sched_min_granularity
);
695 #define WRT_SYSCTL(name) \
696 (normalized_sysctl_##name = sysctl_##name / (factor))
697 WRT_SYSCTL(sched_min_granularity
);
698 WRT_SYSCTL(sched_latency
);
699 WRT_SYSCTL(sched_wakeup_granularity
);
709 static inline u64
calc_delta_fair(u64 delta
, struct sched_entity
*se
)
711 if (unlikely(se
->load
.weight
!= NICE_0_LOAD
))
712 delta
= __calc_delta(delta
, NICE_0_LOAD
, &se
->load
);
718 * The idea is to set a period in which each task runs once.
720 * When there are too many tasks (sched_nr_latency) we have to stretch
721 * this period because otherwise the slices get too small.
723 * p = (nr <= nl) ? l : l*nr/nl
725 static u64
__sched_period(unsigned long nr_running
)
727 if (unlikely(nr_running
> sched_nr_latency
))
728 return nr_running
* sysctl_sched_min_granularity
;
730 return sysctl_sched_latency
;
733 static bool sched_idle_cfs_rq(struct cfs_rq
*cfs_rq
);
736 * We calculate the wall-time slice from the period by taking a part
737 * proportional to the weight.
741 static u64
sched_slice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
743 unsigned int nr_running
= cfs_rq
->nr_running
;
744 struct sched_entity
*init_se
= se
;
745 unsigned int min_gran
;
748 if (sched_feat(ALT_PERIOD
))
749 nr_running
= rq_of(cfs_rq
)->cfs
.h_nr_running
;
751 slice
= __sched_period(nr_running
+ !se
->on_rq
);
753 for_each_sched_entity(se
) {
754 struct load_weight
*load
;
755 struct load_weight lw
;
756 struct cfs_rq
*qcfs_rq
;
758 qcfs_rq
= cfs_rq_of(se
);
759 load
= &qcfs_rq
->load
;
761 if (unlikely(!se
->on_rq
)) {
764 update_load_add(&lw
, se
->load
.weight
);
767 slice
= __calc_delta(slice
, se
->load
.weight
, load
);
770 if (sched_feat(BASE_SLICE
)) {
771 if (se_is_idle(init_se
) && !sched_idle_cfs_rq(cfs_rq
))
772 min_gran
= sysctl_sched_idle_min_granularity
;
774 min_gran
= sysctl_sched_min_granularity
;
776 slice
= max_t(u64
, slice
, min_gran
);
783 * We calculate the vruntime slice of a to-be-inserted task.
787 static u64
sched_vslice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
789 return calc_delta_fair(sched_slice(cfs_rq
, se
), se
);
795 static int select_idle_sibling(struct task_struct
*p
, int prev_cpu
, int cpu
);
796 static unsigned long task_h_load(struct task_struct
*p
);
797 static unsigned long capacity_of(int cpu
);
799 /* Give new sched_entity start runnable values to heavy its load in infant time */
800 void init_entity_runnable_average(struct sched_entity
*se
)
802 struct sched_avg
*sa
= &se
->avg
;
804 memset(sa
, 0, sizeof(*sa
));
807 * Tasks are initialized with full load to be seen as heavy tasks until
808 * they get a chance to stabilize to their real load level.
809 * Group entities are initialized with zero load to reflect the fact that
810 * nothing has been attached to the task group yet.
812 if (entity_is_task(se
))
813 sa
->load_avg
= scale_load_down(se
->load
.weight
);
815 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
819 * With new tasks being created, their initial util_avgs are extrapolated
820 * based on the cfs_rq's current util_avg:
822 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
824 * However, in many cases, the above util_avg does not give a desired
825 * value. Moreover, the sum of the util_avgs may be divergent, such
826 * as when the series is a harmonic series.
828 * To solve this problem, we also cap the util_avg of successive tasks to
829 * only 1/2 of the left utilization budget:
831 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
833 * where n denotes the nth task and cpu_scale the CPU capacity.
835 * For example, for a CPU with 1024 of capacity, a simplest series from
836 * the beginning would be like:
838 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
839 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
841 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
842 * if util_avg > util_avg_cap.
844 void post_init_entity_util_avg(struct task_struct
*p
)
846 struct sched_entity
*se
= &p
->se
;
847 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
848 struct sched_avg
*sa
= &se
->avg
;
849 long cpu_scale
= arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq
)));
850 long cap
= (long)(cpu_scale
- cfs_rq
->avg
.util_avg
) / 2;
852 if (p
->sched_class
!= &fair_sched_class
) {
854 * For !fair tasks do:
856 update_cfs_rq_load_avg(now, cfs_rq);
857 attach_entity_load_avg(cfs_rq, se);
858 switched_from_fair(rq, p);
860 * such that the next switched_to_fair() has the
863 se
->avg
.last_update_time
= cfs_rq_clock_pelt(cfs_rq
);
868 if (cfs_rq
->avg
.util_avg
!= 0) {
869 sa
->util_avg
= cfs_rq
->avg
.util_avg
* se
->load
.weight
;
870 sa
->util_avg
/= (cfs_rq
->avg
.load_avg
+ 1);
872 if (sa
->util_avg
> cap
)
879 sa
->runnable_avg
= sa
->util_avg
;
882 #else /* !CONFIG_SMP */
883 void init_entity_runnable_average(struct sched_entity
*se
)
886 void post_init_entity_util_avg(struct task_struct
*p
)
889 static void update_tg_load_avg(struct cfs_rq
*cfs_rq
)
892 #endif /* CONFIG_SMP */
895 * Update the current task's runtime statistics.
897 static void update_curr(struct cfs_rq
*cfs_rq
)
899 struct sched_entity
*curr
= cfs_rq
->curr
;
900 u64 now
= rq_clock_task(rq_of(cfs_rq
));
906 delta_exec
= now
- curr
->exec_start
;
907 if (unlikely((s64
)delta_exec
<= 0))
910 curr
->exec_start
= now
;
912 if (schedstat_enabled()) {
913 struct sched_statistics
*stats
;
915 stats
= __schedstats_from_se(curr
);
916 __schedstat_set(stats
->exec_max
,
917 max(delta_exec
, stats
->exec_max
));
920 curr
->sum_exec_runtime
+= delta_exec
;
921 schedstat_add(cfs_rq
->exec_clock
, delta_exec
);
923 curr
->vruntime
+= calc_delta_fair(delta_exec
, curr
);
924 update_min_vruntime(cfs_rq
);
926 if (entity_is_task(curr
)) {
927 struct task_struct
*curtask
= task_of(curr
);
929 trace_sched_stat_runtime(curtask
, delta_exec
, curr
->vruntime
);
930 cgroup_account_cputime(curtask
, delta_exec
);
931 account_group_exec_runtime(curtask
, delta_exec
);
934 account_cfs_rq_runtime(cfs_rq
, delta_exec
);
937 static void update_curr_fair(struct rq
*rq
)
939 update_curr(cfs_rq_of(&rq
->curr
->se
));
943 update_stats_wait_start_fair(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
945 struct sched_statistics
*stats
;
946 struct task_struct
*p
= NULL
;
948 if (!schedstat_enabled())
951 stats
= __schedstats_from_se(se
);
953 if (entity_is_task(se
))
956 __update_stats_wait_start(rq_of(cfs_rq
), p
, stats
);
960 update_stats_wait_end_fair(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
962 struct sched_statistics
*stats
;
963 struct task_struct
*p
= NULL
;
965 if (!schedstat_enabled())
968 stats
= __schedstats_from_se(se
);
971 * When the sched_schedstat changes from 0 to 1, some sched se
972 * maybe already in the runqueue, the se->statistics.wait_start
973 * will be 0.So it will let the delta wrong. We need to avoid this
976 if (unlikely(!schedstat_val(stats
->wait_start
)))
979 if (entity_is_task(se
))
982 __update_stats_wait_end(rq_of(cfs_rq
), p
, stats
);
986 update_stats_enqueue_sleeper_fair(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
988 struct sched_statistics
*stats
;
989 struct task_struct
*tsk
= NULL
;
991 if (!schedstat_enabled())
994 stats
= __schedstats_from_se(se
);
996 if (entity_is_task(se
))
999 __update_stats_enqueue_sleeper(rq_of(cfs_rq
), tsk
, stats
);
1003 * Task is being enqueued - update stats:
1006 update_stats_enqueue_fair(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1008 if (!schedstat_enabled())
1012 * Are we enqueueing a waiting task? (for current tasks
1013 * a dequeue/enqueue event is a NOP)
1015 if (se
!= cfs_rq
->curr
)
1016 update_stats_wait_start_fair(cfs_rq
, se
);
1018 if (flags
& ENQUEUE_WAKEUP
)
1019 update_stats_enqueue_sleeper_fair(cfs_rq
, se
);
1023 update_stats_dequeue_fair(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
1026 if (!schedstat_enabled())
1030 * Mark the end of the wait period if dequeueing a
1033 if (se
!= cfs_rq
->curr
)
1034 update_stats_wait_end_fair(cfs_rq
, se
);
1036 if ((flags
& DEQUEUE_SLEEP
) && entity_is_task(se
)) {
1037 struct task_struct
*tsk
= task_of(se
);
1040 /* XXX racy against TTWU */
1041 state
= READ_ONCE(tsk
->__state
);
1042 if (state
& TASK_INTERRUPTIBLE
)
1043 __schedstat_set(tsk
->stats
.sleep_start
,
1044 rq_clock(rq_of(cfs_rq
)));
1045 if (state
& TASK_UNINTERRUPTIBLE
)
1046 __schedstat_set(tsk
->stats
.block_start
,
1047 rq_clock(rq_of(cfs_rq
)));
1052 * We are picking a new current task - update its stats:
1055 update_stats_curr_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
1058 * We are starting a new run period:
1060 se
->exec_start
= rq_clock_task(rq_of(cfs_rq
));
1063 /**************************************************
1064 * Scheduling class queueing methods:
1068 #define NUMA_IMBALANCE_MIN 2
1071 adjust_numa_imbalance(int imbalance
, int dst_running
, int imb_numa_nr
)
1074 * Allow a NUMA imbalance if busy CPUs is less than the maximum
1075 * threshold. Above this threshold, individual tasks may be contending
1076 * for both memory bandwidth and any shared HT resources. This is an
1077 * approximation as the number of running tasks may not be related to
1078 * the number of busy CPUs due to sched_setaffinity.
1080 if (dst_running
> imb_numa_nr
)
1084 * Allow a small imbalance based on a simple pair of communicating
1085 * tasks that remain local when the destination is lightly loaded.
1087 if (imbalance
<= NUMA_IMBALANCE_MIN
)
1092 #endif /* CONFIG_NUMA */
1094 #ifdef CONFIG_NUMA_BALANCING
1096 * Approximate time to scan a full NUMA task in ms. The task scan period is
1097 * calculated based on the tasks virtual memory size and
1098 * numa_balancing_scan_size.
1100 unsigned int sysctl_numa_balancing_scan_period_min
= 1000;
1101 unsigned int sysctl_numa_balancing_scan_period_max
= 60000;
1103 /* Portion of address space to scan in MB */
1104 unsigned int sysctl_numa_balancing_scan_size
= 256;
1106 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1107 unsigned int sysctl_numa_balancing_scan_delay
= 1000;
1109 /* The page with hint page fault latency < threshold in ms is considered hot */
1110 unsigned int sysctl_numa_balancing_hot_threshold
= MSEC_PER_SEC
;
1113 refcount_t refcount
;
1115 spinlock_t lock
; /* nr_tasks, tasks */
1120 struct rcu_head rcu
;
1121 unsigned long total_faults
;
1122 unsigned long max_faults_cpu
;
1124 * faults[] array is split into two regions: faults_mem and faults_cpu.
1126 * Faults_cpu is used to decide whether memory should move
1127 * towards the CPU. As a consequence, these stats are weighted
1128 * more by CPU use than by memory faults.
1130 unsigned long faults
[];
1134 * For functions that can be called in multiple contexts that permit reading
1135 * ->numa_group (see struct task_struct for locking rules).
1137 static struct numa_group
*deref_task_numa_group(struct task_struct
*p
)
1139 return rcu_dereference_check(p
->numa_group
, p
== current
||
1140 (lockdep_is_held(__rq_lockp(task_rq(p
))) && !READ_ONCE(p
->on_cpu
)));
1143 static struct numa_group
*deref_curr_numa_group(struct task_struct
*p
)
1145 return rcu_dereference_protected(p
->numa_group
, p
== current
);
1148 static inline unsigned long group_faults_priv(struct numa_group
*ng
);
1149 static inline unsigned long group_faults_shared(struct numa_group
*ng
);
1151 static unsigned int task_nr_scan_windows(struct task_struct
*p
)
1153 unsigned long rss
= 0;
1154 unsigned long nr_scan_pages
;
1157 * Calculations based on RSS as non-present and empty pages are skipped
1158 * by the PTE scanner and NUMA hinting faults should be trapped based
1161 nr_scan_pages
= sysctl_numa_balancing_scan_size
<< (20 - PAGE_SHIFT
);
1162 rss
= get_mm_rss(p
->mm
);
1164 rss
= nr_scan_pages
;
1166 rss
= round_up(rss
, nr_scan_pages
);
1167 return rss
/ nr_scan_pages
;
1170 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1171 #define MAX_SCAN_WINDOW 2560
1173 static unsigned int task_scan_min(struct task_struct
*p
)
1175 unsigned int scan_size
= READ_ONCE(sysctl_numa_balancing_scan_size
);
1176 unsigned int scan
, floor
;
1177 unsigned int windows
= 1;
1179 if (scan_size
< MAX_SCAN_WINDOW
)
1180 windows
= MAX_SCAN_WINDOW
/ scan_size
;
1181 floor
= 1000 / windows
;
1183 scan
= sysctl_numa_balancing_scan_period_min
/ task_nr_scan_windows(p
);
1184 return max_t(unsigned int, floor
, scan
);
1187 static unsigned int task_scan_start(struct task_struct
*p
)
1189 unsigned long smin
= task_scan_min(p
);
1190 unsigned long period
= smin
;
1191 struct numa_group
*ng
;
1193 /* Scale the maximum scan period with the amount of shared memory. */
1195 ng
= rcu_dereference(p
->numa_group
);
1197 unsigned long shared
= group_faults_shared(ng
);
1198 unsigned long private = group_faults_priv(ng
);
1200 period
*= refcount_read(&ng
->refcount
);
1201 period
*= shared
+ 1;
1202 period
/= private + shared
+ 1;
1206 return max(smin
, period
);
1209 static unsigned int task_scan_max(struct task_struct
*p
)
1211 unsigned long smin
= task_scan_min(p
);
1213 struct numa_group
*ng
;
1215 /* Watch for min being lower than max due to floor calculations */
1216 smax
= sysctl_numa_balancing_scan_period_max
/ task_nr_scan_windows(p
);
1218 /* Scale the maximum scan period with the amount of shared memory. */
1219 ng
= deref_curr_numa_group(p
);
1221 unsigned long shared
= group_faults_shared(ng
);
1222 unsigned long private = group_faults_priv(ng
);
1223 unsigned long period
= smax
;
1225 period
*= refcount_read(&ng
->refcount
);
1226 period
*= shared
+ 1;
1227 period
/= private + shared
+ 1;
1229 smax
= max(smax
, period
);
1232 return max(smin
, smax
);
1235 static void account_numa_enqueue(struct rq
*rq
, struct task_struct
*p
)
1237 rq
->nr_numa_running
+= (p
->numa_preferred_nid
!= NUMA_NO_NODE
);
1238 rq
->nr_preferred_running
+= (p
->numa_preferred_nid
== task_node(p
));
1241 static void account_numa_dequeue(struct rq
*rq
, struct task_struct
*p
)
1243 rq
->nr_numa_running
-= (p
->numa_preferred_nid
!= NUMA_NO_NODE
);
1244 rq
->nr_preferred_running
-= (p
->numa_preferred_nid
== task_node(p
));
1247 /* Shared or private faults. */
1248 #define NR_NUMA_HINT_FAULT_TYPES 2
1250 /* Memory and CPU locality */
1251 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1253 /* Averaged statistics, and temporary buffers. */
1254 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1256 pid_t
task_numa_group_id(struct task_struct
*p
)
1258 struct numa_group
*ng
;
1262 ng
= rcu_dereference(p
->numa_group
);
1271 * The averaged statistics, shared & private, memory & CPU,
1272 * occupy the first half of the array. The second half of the
1273 * array is for current counters, which are averaged into the
1274 * first set by task_numa_placement.
1276 static inline int task_faults_idx(enum numa_faults_stats s
, int nid
, int priv
)
1278 return NR_NUMA_HINT_FAULT_TYPES
* (s
* nr_node_ids
+ nid
) + priv
;
1281 static inline unsigned long task_faults(struct task_struct
*p
, int nid
)
1283 if (!p
->numa_faults
)
1286 return p
->numa_faults
[task_faults_idx(NUMA_MEM
, nid
, 0)] +
1287 p
->numa_faults
[task_faults_idx(NUMA_MEM
, nid
, 1)];
1290 static inline unsigned long group_faults(struct task_struct
*p
, int nid
)
1292 struct numa_group
*ng
= deref_task_numa_group(p
);
1297 return ng
->faults
[task_faults_idx(NUMA_MEM
, nid
, 0)] +
1298 ng
->faults
[task_faults_idx(NUMA_MEM
, nid
, 1)];
1301 static inline unsigned long group_faults_cpu(struct numa_group
*group
, int nid
)
1303 return group
->faults
[task_faults_idx(NUMA_CPU
, nid
, 0)] +
1304 group
->faults
[task_faults_idx(NUMA_CPU
, nid
, 1)];
1307 static inline unsigned long group_faults_priv(struct numa_group
*ng
)
1309 unsigned long faults
= 0;
1312 for_each_online_node(node
) {
1313 faults
+= ng
->faults
[task_faults_idx(NUMA_MEM
, node
, 1)];
1319 static inline unsigned long group_faults_shared(struct numa_group
*ng
)
1321 unsigned long faults
= 0;
1324 for_each_online_node(node
) {
1325 faults
+= ng
->faults
[task_faults_idx(NUMA_MEM
, node
, 0)];
1332 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1333 * considered part of a numa group's pseudo-interleaving set. Migrations
1334 * between these nodes are slowed down, to allow things to settle down.
1336 #define ACTIVE_NODE_FRACTION 3
1338 static bool numa_is_active_node(int nid
, struct numa_group
*ng
)
1340 return group_faults_cpu(ng
, nid
) * ACTIVE_NODE_FRACTION
> ng
->max_faults_cpu
;
1343 /* Handle placement on systems where not all nodes are directly connected. */
1344 static unsigned long score_nearby_nodes(struct task_struct
*p
, int nid
,
1345 int lim_dist
, bool task
)
1347 unsigned long score
= 0;
1351 * All nodes are directly connected, and the same distance
1352 * from each other. No need for fancy placement algorithms.
1354 if (sched_numa_topology_type
== NUMA_DIRECT
)
1357 /* sched_max_numa_distance may be changed in parallel. */
1358 max_dist
= READ_ONCE(sched_max_numa_distance
);
1360 * This code is called for each node, introducing N^2 complexity,
1361 * which should be ok given the number of nodes rarely exceeds 8.
1363 for_each_online_node(node
) {
1364 unsigned long faults
;
1365 int dist
= node_distance(nid
, node
);
1368 * The furthest away nodes in the system are not interesting
1369 * for placement; nid was already counted.
1371 if (dist
>= max_dist
|| node
== nid
)
1375 * On systems with a backplane NUMA topology, compare groups
1376 * of nodes, and move tasks towards the group with the most
1377 * memory accesses. When comparing two nodes at distance
1378 * "hoplimit", only nodes closer by than "hoplimit" are part
1379 * of each group. Skip other nodes.
1381 if (sched_numa_topology_type
== NUMA_BACKPLANE
&& dist
>= lim_dist
)
1384 /* Add up the faults from nearby nodes. */
1386 faults
= task_faults(p
, node
);
1388 faults
= group_faults(p
, node
);
1391 * On systems with a glueless mesh NUMA topology, there are
1392 * no fixed "groups of nodes". Instead, nodes that are not
1393 * directly connected bounce traffic through intermediate
1394 * nodes; a numa_group can occupy any set of nodes.
1395 * The further away a node is, the less the faults count.
1396 * This seems to result in good task placement.
1398 if (sched_numa_topology_type
== NUMA_GLUELESS_MESH
) {
1399 faults
*= (max_dist
- dist
);
1400 faults
/= (max_dist
- LOCAL_DISTANCE
);
1410 * These return the fraction of accesses done by a particular task, or
1411 * task group, on a particular numa node. The group weight is given a
1412 * larger multiplier, in order to group tasks together that are almost
1413 * evenly spread out between numa nodes.
1415 static inline unsigned long task_weight(struct task_struct
*p
, int nid
,
1418 unsigned long faults
, total_faults
;
1420 if (!p
->numa_faults
)
1423 total_faults
= p
->total_numa_faults
;
1428 faults
= task_faults(p
, nid
);
1429 faults
+= score_nearby_nodes(p
, nid
, dist
, true);
1431 return 1000 * faults
/ total_faults
;
1434 static inline unsigned long group_weight(struct task_struct
*p
, int nid
,
1437 struct numa_group
*ng
= deref_task_numa_group(p
);
1438 unsigned long faults
, total_faults
;
1443 total_faults
= ng
->total_faults
;
1448 faults
= group_faults(p
, nid
);
1449 faults
+= score_nearby_nodes(p
, nid
, dist
, false);
1451 return 1000 * faults
/ total_faults
;
1455 * If memory tiering mode is enabled, cpupid of slow memory page is
1456 * used to record scan time instead of CPU and PID. When tiering mode
1457 * is disabled at run time, the scan time (in cpupid) will be
1458 * interpreted as CPU and PID. So CPU needs to be checked to avoid to
1459 * access out of array bound.
1461 static inline bool cpupid_valid(int cpupid
)
1463 return cpupid_to_cpu(cpupid
) < nr_cpu_ids
;
1467 * For memory tiering mode, if there are enough free pages (more than
1468 * enough watermark defined here) in fast memory node, to take full
1469 * advantage of fast memory capacity, all recently accessed slow
1470 * memory pages will be migrated to fast memory node without
1471 * considering hot threshold.
1473 static bool pgdat_free_space_enough(struct pglist_data
*pgdat
)
1476 unsigned long enough_wmark
;
1478 enough_wmark
= max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT
,
1479 pgdat
->node_present_pages
>> 4);
1480 for (z
= pgdat
->nr_zones
- 1; z
>= 0; z
--) {
1481 struct zone
*zone
= pgdat
->node_zones
+ z
;
1483 if (!populated_zone(zone
))
1486 if (zone_watermark_ok(zone
, 0,
1487 wmark_pages(zone
, WMARK_PROMO
) + enough_wmark
,
1495 * For memory tiering mode, when page tables are scanned, the scan
1496 * time will be recorded in struct page in addition to make page
1497 * PROT_NONE for slow memory page. So when the page is accessed, in
1498 * hint page fault handler, the hint page fault latency is calculated
1501 * hint page fault latency = hint page fault time - scan time
1503 * The smaller the hint page fault latency, the higher the possibility
1504 * for the page to be hot.
1506 static int numa_hint_fault_latency(struct page
*page
)
1508 int last_time
, time
;
1510 time
= jiffies_to_msecs(jiffies
);
1511 last_time
= xchg_page_access_time(page
, time
);
1513 return (time
- last_time
) & PAGE_ACCESS_TIME_MASK
;
1517 * For memory tiering mode, too high promotion/demotion throughput may
1518 * hurt application latency. So we provide a mechanism to rate limit
1519 * the number of pages that are tried to be promoted.
1521 static bool numa_promotion_rate_limit(struct pglist_data
*pgdat
,
1522 unsigned long rate_limit
, int nr
)
1524 unsigned long nr_cand
;
1525 unsigned int now
, start
;
1527 now
= jiffies_to_msecs(jiffies
);
1528 mod_node_page_state(pgdat
, PGPROMOTE_CANDIDATE
, nr
);
1529 nr_cand
= node_page_state(pgdat
, PGPROMOTE_CANDIDATE
);
1530 start
= pgdat
->nbp_rl_start
;
1531 if (now
- start
> MSEC_PER_SEC
&&
1532 cmpxchg(&pgdat
->nbp_rl_start
, start
, now
) == start
)
1533 pgdat
->nbp_rl_nr_cand
= nr_cand
;
1534 if (nr_cand
- pgdat
->nbp_rl_nr_cand
>= rate_limit
)
1539 #define NUMA_MIGRATION_ADJUST_STEPS 16
1541 static void numa_promotion_adjust_threshold(struct pglist_data
*pgdat
,
1542 unsigned long rate_limit
,
1543 unsigned int ref_th
)
1545 unsigned int now
, start
, th_period
, unit_th
, th
;
1546 unsigned long nr_cand
, ref_cand
, diff_cand
;
1548 now
= jiffies_to_msecs(jiffies
);
1549 th_period
= sysctl_numa_balancing_scan_period_max
;
1550 start
= pgdat
->nbp_th_start
;
1551 if (now
- start
> th_period
&&
1552 cmpxchg(&pgdat
->nbp_th_start
, start
, now
) == start
) {
1553 ref_cand
= rate_limit
*
1554 sysctl_numa_balancing_scan_period_max
/ MSEC_PER_SEC
;
1555 nr_cand
= node_page_state(pgdat
, PGPROMOTE_CANDIDATE
);
1556 diff_cand
= nr_cand
- pgdat
->nbp_th_nr_cand
;
1557 unit_th
= ref_th
* 2 / NUMA_MIGRATION_ADJUST_STEPS
;
1558 th
= pgdat
->nbp_threshold
? : ref_th
;
1559 if (diff_cand
> ref_cand
* 11 / 10)
1560 th
= max(th
- unit_th
, unit_th
);
1561 else if (diff_cand
< ref_cand
* 9 / 10)
1562 th
= min(th
+ unit_th
, ref_th
* 2);
1563 pgdat
->nbp_th_nr_cand
= nr_cand
;
1564 pgdat
->nbp_threshold
= th
;
1568 bool should_numa_migrate_memory(struct task_struct
*p
, struct page
* page
,
1569 int src_nid
, int dst_cpu
)
1571 struct numa_group
*ng
= deref_curr_numa_group(p
);
1572 int dst_nid
= cpu_to_node(dst_cpu
);
1573 int last_cpupid
, this_cpupid
;
1576 * The pages in slow memory node should be migrated according
1577 * to hot/cold instead of private/shared.
1579 if (sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
&&
1580 !node_is_toptier(src_nid
)) {
1581 struct pglist_data
*pgdat
;
1582 unsigned long rate_limit
;
1583 unsigned int latency
, th
, def_th
;
1585 pgdat
= NODE_DATA(dst_nid
);
1586 if (pgdat_free_space_enough(pgdat
)) {
1587 /* workload changed, reset hot threshold */
1588 pgdat
->nbp_threshold
= 0;
1592 def_th
= sysctl_numa_balancing_hot_threshold
;
1593 rate_limit
= sysctl_numa_balancing_promote_rate_limit
<< \
1595 numa_promotion_adjust_threshold(pgdat
, rate_limit
, def_th
);
1597 th
= pgdat
->nbp_threshold
? : def_th
;
1598 latency
= numa_hint_fault_latency(page
);
1602 return !numa_promotion_rate_limit(pgdat
, rate_limit
,
1603 thp_nr_pages(page
));
1606 this_cpupid
= cpu_pid_to_cpupid(dst_cpu
, current
->pid
);
1607 last_cpupid
= page_cpupid_xchg_last(page
, this_cpupid
);
1609 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
) &&
1610 !node_is_toptier(src_nid
) && !cpupid_valid(last_cpupid
))
1614 * Allow first faults or private faults to migrate immediately early in
1615 * the lifetime of a task. The magic number 4 is based on waiting for
1616 * two full passes of the "multi-stage node selection" test that is
1619 if ((p
->numa_preferred_nid
== NUMA_NO_NODE
|| p
->numa_scan_seq
<= 4) &&
1620 (cpupid_pid_unset(last_cpupid
) || cpupid_match_pid(p
, last_cpupid
)))
1624 * Multi-stage node selection is used in conjunction with a periodic
1625 * migration fault to build a temporal task<->page relation. By using
1626 * a two-stage filter we remove short/unlikely relations.
1628 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1629 * a task's usage of a particular page (n_p) per total usage of this
1630 * page (n_t) (in a given time-span) to a probability.
1632 * Our periodic faults will sample this probability and getting the
1633 * same result twice in a row, given these samples are fully
1634 * independent, is then given by P(n)^2, provided our sample period
1635 * is sufficiently short compared to the usage pattern.
1637 * This quadric squishes small probabilities, making it less likely we
1638 * act on an unlikely task<->page relation.
1640 if (!cpupid_pid_unset(last_cpupid
) &&
1641 cpupid_to_nid(last_cpupid
) != dst_nid
)
1644 /* Always allow migrate on private faults */
1645 if (cpupid_match_pid(p
, last_cpupid
))
1648 /* A shared fault, but p->numa_group has not been set up yet. */
1653 * Destination node is much more heavily used than the source
1654 * node? Allow migration.
1656 if (group_faults_cpu(ng
, dst_nid
) > group_faults_cpu(ng
, src_nid
) *
1657 ACTIVE_NODE_FRACTION
)
1661 * Distribute memory according to CPU & memory use on each node,
1662 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1664 * faults_cpu(dst) 3 faults_cpu(src)
1665 * --------------- * - > ---------------
1666 * faults_mem(dst) 4 faults_mem(src)
1668 return group_faults_cpu(ng
, dst_nid
) * group_faults(p
, src_nid
) * 3 >
1669 group_faults_cpu(ng
, src_nid
) * group_faults(p
, dst_nid
) * 4;
1673 * 'numa_type' describes the node at the moment of load balancing.
1676 /* The node has spare capacity that can be used to run more tasks. */
1679 * The node is fully used and the tasks don't compete for more CPU
1680 * cycles. Nevertheless, some tasks might wait before running.
1684 * The node is overloaded and can't provide expected CPU cycles to all
1690 /* Cached statistics for all CPUs within a node */
1693 unsigned long runnable
;
1695 /* Total compute capacity of CPUs on a node */
1696 unsigned long compute_capacity
;
1697 unsigned int nr_running
;
1698 unsigned int weight
;
1699 enum numa_type node_type
;
1703 static inline bool is_core_idle(int cpu
)
1705 #ifdef CONFIG_SCHED_SMT
1708 for_each_cpu(sibling
, cpu_smt_mask(cpu
)) {
1712 if (!idle_cpu(sibling
))
1720 struct task_numa_env
{
1721 struct task_struct
*p
;
1723 int src_cpu
, src_nid
;
1724 int dst_cpu
, dst_nid
;
1727 struct numa_stats src_stats
, dst_stats
;
1732 struct task_struct
*best_task
;
1737 static unsigned long cpu_load(struct rq
*rq
);
1738 static unsigned long cpu_runnable(struct rq
*rq
);
1741 numa_type
numa_classify(unsigned int imbalance_pct
,
1742 struct numa_stats
*ns
)
1744 if ((ns
->nr_running
> ns
->weight
) &&
1745 (((ns
->compute_capacity
* 100) < (ns
->util
* imbalance_pct
)) ||
1746 ((ns
->compute_capacity
* imbalance_pct
) < (ns
->runnable
* 100))))
1747 return node_overloaded
;
1749 if ((ns
->nr_running
< ns
->weight
) ||
1750 (((ns
->compute_capacity
* 100) > (ns
->util
* imbalance_pct
)) &&
1751 ((ns
->compute_capacity
* imbalance_pct
) > (ns
->runnable
* 100))))
1752 return node_has_spare
;
1754 return node_fully_busy
;
1757 #ifdef CONFIG_SCHED_SMT
1758 /* Forward declarations of select_idle_sibling helpers */
1759 static inline bool test_idle_cores(int cpu
);
1760 static inline int numa_idle_core(int idle_core
, int cpu
)
1762 if (!static_branch_likely(&sched_smt_present
) ||
1763 idle_core
>= 0 || !test_idle_cores(cpu
))
1767 * Prefer cores instead of packing HT siblings
1768 * and triggering future load balancing.
1770 if (is_core_idle(cpu
))
1776 static inline int numa_idle_core(int idle_core
, int cpu
)
1783 * Gather all necessary information to make NUMA balancing placement
1784 * decisions that are compatible with standard load balancer. This
1785 * borrows code and logic from update_sg_lb_stats but sharing a
1786 * common implementation is impractical.
1788 static void update_numa_stats(struct task_numa_env
*env
,
1789 struct numa_stats
*ns
, int nid
,
1792 int cpu
, idle_core
= -1;
1794 memset(ns
, 0, sizeof(*ns
));
1798 for_each_cpu(cpu
, cpumask_of_node(nid
)) {
1799 struct rq
*rq
= cpu_rq(cpu
);
1801 ns
->load
+= cpu_load(rq
);
1802 ns
->runnable
+= cpu_runnable(rq
);
1803 ns
->util
+= cpu_util_cfs(cpu
);
1804 ns
->nr_running
+= rq
->cfs
.h_nr_running
;
1805 ns
->compute_capacity
+= capacity_of(cpu
);
1807 if (find_idle
&& idle_core
< 0 && !rq
->nr_running
&& idle_cpu(cpu
)) {
1808 if (READ_ONCE(rq
->numa_migrate_on
) ||
1809 !cpumask_test_cpu(cpu
, env
->p
->cpus_ptr
))
1812 if (ns
->idle_cpu
== -1)
1815 idle_core
= numa_idle_core(idle_core
, cpu
);
1820 ns
->weight
= cpumask_weight(cpumask_of_node(nid
));
1822 ns
->node_type
= numa_classify(env
->imbalance_pct
, ns
);
1825 ns
->idle_cpu
= idle_core
;
1828 static void task_numa_assign(struct task_numa_env
*env
,
1829 struct task_struct
*p
, long imp
)
1831 struct rq
*rq
= cpu_rq(env
->dst_cpu
);
1833 /* Check if run-queue part of active NUMA balance. */
1834 if (env
->best_cpu
!= env
->dst_cpu
&& xchg(&rq
->numa_migrate_on
, 1)) {
1836 int start
= env
->dst_cpu
;
1838 /* Find alternative idle CPU. */
1839 for_each_cpu_wrap(cpu
, cpumask_of_node(env
->dst_nid
), start
+ 1) {
1840 if (cpu
== env
->best_cpu
|| !idle_cpu(cpu
) ||
1841 !cpumask_test_cpu(cpu
, env
->p
->cpus_ptr
)) {
1846 rq
= cpu_rq(env
->dst_cpu
);
1847 if (!xchg(&rq
->numa_migrate_on
, 1))
1851 /* Failed to find an alternative idle CPU */
1857 * Clear previous best_cpu/rq numa-migrate flag, since task now
1858 * found a better CPU to move/swap.
1860 if (env
->best_cpu
!= -1 && env
->best_cpu
!= env
->dst_cpu
) {
1861 rq
= cpu_rq(env
->best_cpu
);
1862 WRITE_ONCE(rq
->numa_migrate_on
, 0);
1866 put_task_struct(env
->best_task
);
1871 env
->best_imp
= imp
;
1872 env
->best_cpu
= env
->dst_cpu
;
1875 static bool load_too_imbalanced(long src_load
, long dst_load
,
1876 struct task_numa_env
*env
)
1879 long orig_src_load
, orig_dst_load
;
1880 long src_capacity
, dst_capacity
;
1883 * The load is corrected for the CPU capacity available on each node.
1886 * ------------ vs ---------
1887 * src_capacity dst_capacity
1889 src_capacity
= env
->src_stats
.compute_capacity
;
1890 dst_capacity
= env
->dst_stats
.compute_capacity
;
1892 imb
= abs(dst_load
* src_capacity
- src_load
* dst_capacity
);
1894 orig_src_load
= env
->src_stats
.load
;
1895 orig_dst_load
= env
->dst_stats
.load
;
1897 old_imb
= abs(orig_dst_load
* src_capacity
- orig_src_load
* dst_capacity
);
1899 /* Would this change make things worse? */
1900 return (imb
> old_imb
);
1904 * Maximum NUMA importance can be 1998 (2*999);
1905 * SMALLIMP @ 30 would be close to 1998/64.
1906 * Used to deter task migration.
1911 * This checks if the overall compute and NUMA accesses of the system would
1912 * be improved if the source tasks was migrated to the target dst_cpu taking
1913 * into account that it might be best if task running on the dst_cpu should
1914 * be exchanged with the source task
1916 static bool task_numa_compare(struct task_numa_env
*env
,
1917 long taskimp
, long groupimp
, bool maymove
)
1919 struct numa_group
*cur_ng
, *p_ng
= deref_curr_numa_group(env
->p
);
1920 struct rq
*dst_rq
= cpu_rq(env
->dst_cpu
);
1921 long imp
= p_ng
? groupimp
: taskimp
;
1922 struct task_struct
*cur
;
1923 long src_load
, dst_load
;
1924 int dist
= env
->dist
;
1927 bool stopsearch
= false;
1929 if (READ_ONCE(dst_rq
->numa_migrate_on
))
1933 cur
= rcu_dereference(dst_rq
->curr
);
1934 if (cur
&& ((cur
->flags
& PF_EXITING
) || is_idle_task(cur
)))
1938 * Because we have preemption enabled we can get migrated around and
1939 * end try selecting ourselves (current == env->p) as a swap candidate.
1941 if (cur
== env
->p
) {
1947 if (maymove
&& moveimp
>= env
->best_imp
)
1953 /* Skip this swap candidate if cannot move to the source cpu. */
1954 if (!cpumask_test_cpu(env
->src_cpu
, cur
->cpus_ptr
))
1958 * Skip this swap candidate if it is not moving to its preferred
1959 * node and the best task is.
1961 if (env
->best_task
&&
1962 env
->best_task
->numa_preferred_nid
== env
->src_nid
&&
1963 cur
->numa_preferred_nid
!= env
->src_nid
) {
1968 * "imp" is the fault differential for the source task between the
1969 * source and destination node. Calculate the total differential for
1970 * the source task and potential destination task. The more negative
1971 * the value is, the more remote accesses that would be expected to
1972 * be incurred if the tasks were swapped.
1974 * If dst and source tasks are in the same NUMA group, or not
1975 * in any group then look only at task weights.
1977 cur_ng
= rcu_dereference(cur
->numa_group
);
1978 if (cur_ng
== p_ng
) {
1980 * Do not swap within a group or between tasks that have
1981 * no group if there is spare capacity. Swapping does
1982 * not address the load imbalance and helps one task at
1983 * the cost of punishing another.
1985 if (env
->dst_stats
.node_type
== node_has_spare
)
1988 imp
= taskimp
+ task_weight(cur
, env
->src_nid
, dist
) -
1989 task_weight(cur
, env
->dst_nid
, dist
);
1991 * Add some hysteresis to prevent swapping the
1992 * tasks within a group over tiny differences.
1998 * Compare the group weights. If a task is all by itself
1999 * (not part of a group), use the task weight instead.
2002 imp
+= group_weight(cur
, env
->src_nid
, dist
) -
2003 group_weight(cur
, env
->dst_nid
, dist
);
2005 imp
+= task_weight(cur
, env
->src_nid
, dist
) -
2006 task_weight(cur
, env
->dst_nid
, dist
);
2009 /* Discourage picking a task already on its preferred node */
2010 if (cur
->numa_preferred_nid
== env
->dst_nid
)
2014 * Encourage picking a task that moves to its preferred node.
2015 * This potentially makes imp larger than it's maximum of
2016 * 1998 (see SMALLIMP and task_weight for why) but in this
2017 * case, it does not matter.
2019 if (cur
->numa_preferred_nid
== env
->src_nid
)
2022 if (maymove
&& moveimp
> imp
&& moveimp
> env
->best_imp
) {
2029 * Prefer swapping with a task moving to its preferred node over a
2032 if (env
->best_task
&& cur
->numa_preferred_nid
== env
->src_nid
&&
2033 env
->best_task
->numa_preferred_nid
!= env
->src_nid
) {
2038 * If the NUMA importance is less than SMALLIMP,
2039 * task migration might only result in ping pong
2040 * of tasks and also hurt performance due to cache
2043 if (imp
< SMALLIMP
|| imp
<= env
->best_imp
+ SMALLIMP
/ 2)
2047 * In the overloaded case, try and keep the load balanced.
2049 load
= task_h_load(env
->p
) - task_h_load(cur
);
2053 dst_load
= env
->dst_stats
.load
+ load
;
2054 src_load
= env
->src_stats
.load
- load
;
2056 if (load_too_imbalanced(src_load
, dst_load
, env
))
2060 /* Evaluate an idle CPU for a task numa move. */
2062 int cpu
= env
->dst_stats
.idle_cpu
;
2064 /* Nothing cached so current CPU went idle since the search. */
2069 * If the CPU is no longer truly idle and the previous best CPU
2070 * is, keep using it.
2072 if (!idle_cpu(cpu
) && env
->best_cpu
>= 0 &&
2073 idle_cpu(env
->best_cpu
)) {
2074 cpu
= env
->best_cpu
;
2080 task_numa_assign(env
, cur
, imp
);
2083 * If a move to idle is allowed because there is capacity or load
2084 * balance improves then stop the search. While a better swap
2085 * candidate may exist, a search is not free.
2087 if (maymove
&& !cur
&& env
->best_cpu
>= 0 && idle_cpu(env
->best_cpu
))
2091 * If a swap candidate must be identified and the current best task
2092 * moves its preferred node then stop the search.
2094 if (!maymove
&& env
->best_task
&&
2095 env
->best_task
->numa_preferred_nid
== env
->src_nid
) {
2104 static void task_numa_find_cpu(struct task_numa_env
*env
,
2105 long taskimp
, long groupimp
)
2107 bool maymove
= false;
2111 * If dst node has spare capacity, then check if there is an
2112 * imbalance that would be overruled by the load balancer.
2114 if (env
->dst_stats
.node_type
== node_has_spare
) {
2115 unsigned int imbalance
;
2116 int src_running
, dst_running
;
2119 * Would movement cause an imbalance? Note that if src has
2120 * more running tasks that the imbalance is ignored as the
2121 * move improves the imbalance from the perspective of the
2122 * CPU load balancer.
2124 src_running
= env
->src_stats
.nr_running
- 1;
2125 dst_running
= env
->dst_stats
.nr_running
+ 1;
2126 imbalance
= max(0, dst_running
- src_running
);
2127 imbalance
= adjust_numa_imbalance(imbalance
, dst_running
,
2130 /* Use idle CPU if there is no imbalance */
2133 if (env
->dst_stats
.idle_cpu
>= 0) {
2134 env
->dst_cpu
= env
->dst_stats
.idle_cpu
;
2135 task_numa_assign(env
, NULL
, 0);
2140 long src_load
, dst_load
, load
;
2142 * If the improvement from just moving env->p direction is better
2143 * than swapping tasks around, check if a move is possible.
2145 load
= task_h_load(env
->p
);
2146 dst_load
= env
->dst_stats
.load
+ load
;
2147 src_load
= env
->src_stats
.load
- load
;
2148 maymove
= !load_too_imbalanced(src_load
, dst_load
, env
);
2151 for_each_cpu(cpu
, cpumask_of_node(env
->dst_nid
)) {
2152 /* Skip this CPU if the source task cannot migrate */
2153 if (!cpumask_test_cpu(cpu
, env
->p
->cpus_ptr
))
2157 if (task_numa_compare(env
, taskimp
, groupimp
, maymove
))
2162 static int task_numa_migrate(struct task_struct
*p
)
2164 struct task_numa_env env
= {
2167 .src_cpu
= task_cpu(p
),
2168 .src_nid
= task_node(p
),
2170 .imbalance_pct
= 112,
2176 unsigned long taskweight
, groupweight
;
2177 struct sched_domain
*sd
;
2178 long taskimp
, groupimp
;
2179 struct numa_group
*ng
;
2184 * Pick the lowest SD_NUMA domain, as that would have the smallest
2185 * imbalance and would be the first to start moving tasks about.
2187 * And we want to avoid any moving of tasks about, as that would create
2188 * random movement of tasks -- counter the numa conditions we're trying
2192 sd
= rcu_dereference(per_cpu(sd_numa
, env
.src_cpu
));
2194 env
.imbalance_pct
= 100 + (sd
->imbalance_pct
- 100) / 2;
2195 env
.imb_numa_nr
= sd
->imb_numa_nr
;
2200 * Cpusets can break the scheduler domain tree into smaller
2201 * balance domains, some of which do not cross NUMA boundaries.
2202 * Tasks that are "trapped" in such domains cannot be migrated
2203 * elsewhere, so there is no point in (re)trying.
2205 if (unlikely(!sd
)) {
2206 sched_setnuma(p
, task_node(p
));
2210 env
.dst_nid
= p
->numa_preferred_nid
;
2211 dist
= env
.dist
= node_distance(env
.src_nid
, env
.dst_nid
);
2212 taskweight
= task_weight(p
, env
.src_nid
, dist
);
2213 groupweight
= group_weight(p
, env
.src_nid
, dist
);
2214 update_numa_stats(&env
, &env
.src_stats
, env
.src_nid
, false);
2215 taskimp
= task_weight(p
, env
.dst_nid
, dist
) - taskweight
;
2216 groupimp
= group_weight(p
, env
.dst_nid
, dist
) - groupweight
;
2217 update_numa_stats(&env
, &env
.dst_stats
, env
.dst_nid
, true);
2219 /* Try to find a spot on the preferred nid. */
2220 task_numa_find_cpu(&env
, taskimp
, groupimp
);
2223 * Look at other nodes in these cases:
2224 * - there is no space available on the preferred_nid
2225 * - the task is part of a numa_group that is interleaved across
2226 * multiple NUMA nodes; in order to better consolidate the group,
2227 * we need to check other locations.
2229 ng
= deref_curr_numa_group(p
);
2230 if (env
.best_cpu
== -1 || (ng
&& ng
->active_nodes
> 1)) {
2231 for_each_node_state(nid
, N_CPU
) {
2232 if (nid
== env
.src_nid
|| nid
== p
->numa_preferred_nid
)
2235 dist
= node_distance(env
.src_nid
, env
.dst_nid
);
2236 if (sched_numa_topology_type
== NUMA_BACKPLANE
&&
2238 taskweight
= task_weight(p
, env
.src_nid
, dist
);
2239 groupweight
= group_weight(p
, env
.src_nid
, dist
);
2242 /* Only consider nodes where both task and groups benefit */
2243 taskimp
= task_weight(p
, nid
, dist
) - taskweight
;
2244 groupimp
= group_weight(p
, nid
, dist
) - groupweight
;
2245 if (taskimp
< 0 && groupimp
< 0)
2250 update_numa_stats(&env
, &env
.dst_stats
, env
.dst_nid
, true);
2251 task_numa_find_cpu(&env
, taskimp
, groupimp
);
2256 * If the task is part of a workload that spans multiple NUMA nodes,
2257 * and is migrating into one of the workload's active nodes, remember
2258 * this node as the task's preferred numa node, so the workload can
2260 * A task that migrated to a second choice node will be better off
2261 * trying for a better one later. Do not set the preferred node here.
2264 if (env
.best_cpu
== -1)
2267 nid
= cpu_to_node(env
.best_cpu
);
2269 if (nid
!= p
->numa_preferred_nid
)
2270 sched_setnuma(p
, nid
);
2273 /* No better CPU than the current one was found. */
2274 if (env
.best_cpu
== -1) {
2275 trace_sched_stick_numa(p
, env
.src_cpu
, NULL
, -1);
2279 best_rq
= cpu_rq(env
.best_cpu
);
2280 if (env
.best_task
== NULL
) {
2281 ret
= migrate_task_to(p
, env
.best_cpu
);
2282 WRITE_ONCE(best_rq
->numa_migrate_on
, 0);
2284 trace_sched_stick_numa(p
, env
.src_cpu
, NULL
, env
.best_cpu
);
2288 ret
= migrate_swap(p
, env
.best_task
, env
.best_cpu
, env
.src_cpu
);
2289 WRITE_ONCE(best_rq
->numa_migrate_on
, 0);
2292 trace_sched_stick_numa(p
, env
.src_cpu
, env
.best_task
, env
.best_cpu
);
2293 put_task_struct(env
.best_task
);
2297 /* Attempt to migrate a task to a CPU on the preferred node. */
2298 static void numa_migrate_preferred(struct task_struct
*p
)
2300 unsigned long interval
= HZ
;
2302 /* This task has no NUMA fault statistics yet */
2303 if (unlikely(p
->numa_preferred_nid
== NUMA_NO_NODE
|| !p
->numa_faults
))
2306 /* Periodically retry migrating the task to the preferred node */
2307 interval
= min(interval
, msecs_to_jiffies(p
->numa_scan_period
) / 16);
2308 p
->numa_migrate_retry
= jiffies
+ interval
;
2310 /* Success if task is already running on preferred CPU */
2311 if (task_node(p
) == p
->numa_preferred_nid
)
2314 /* Otherwise, try migrate to a CPU on the preferred node */
2315 task_numa_migrate(p
);
2319 * Find out how many nodes the workload is actively running on. Do this by
2320 * tracking the nodes from which NUMA hinting faults are triggered. This can
2321 * be different from the set of nodes where the workload's memory is currently
2324 static void numa_group_count_active_nodes(struct numa_group
*numa_group
)
2326 unsigned long faults
, max_faults
= 0;
2327 int nid
, active_nodes
= 0;
2329 for_each_node_state(nid
, N_CPU
) {
2330 faults
= group_faults_cpu(numa_group
, nid
);
2331 if (faults
> max_faults
)
2332 max_faults
= faults
;
2335 for_each_node_state(nid
, N_CPU
) {
2336 faults
= group_faults_cpu(numa_group
, nid
);
2337 if (faults
* ACTIVE_NODE_FRACTION
> max_faults
)
2341 numa_group
->max_faults_cpu
= max_faults
;
2342 numa_group
->active_nodes
= active_nodes
;
2346 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
2347 * increments. The more local the fault statistics are, the higher the scan
2348 * period will be for the next scan window. If local/(local+remote) ratio is
2349 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
2350 * the scan period will decrease. Aim for 70% local accesses.
2352 #define NUMA_PERIOD_SLOTS 10
2353 #define NUMA_PERIOD_THRESHOLD 7
2356 * Increase the scan period (slow down scanning) if the majority of
2357 * our memory is already on our local node, or if the majority of
2358 * the page accesses are shared with other processes.
2359 * Otherwise, decrease the scan period.
2361 static void update_task_scan_period(struct task_struct
*p
,
2362 unsigned long shared
, unsigned long private)
2364 unsigned int period_slot
;
2365 int lr_ratio
, ps_ratio
;
2368 unsigned long remote
= p
->numa_faults_locality
[0];
2369 unsigned long local
= p
->numa_faults_locality
[1];
2372 * If there were no record hinting faults then either the task is
2373 * completely idle or all activity is in areas that are not of interest
2374 * to automatic numa balancing. Related to that, if there were failed
2375 * migration then it implies we are migrating too quickly or the local
2376 * node is overloaded. In either case, scan slower
2378 if (local
+ shared
== 0 || p
->numa_faults_locality
[2]) {
2379 p
->numa_scan_period
= min(p
->numa_scan_period_max
,
2380 p
->numa_scan_period
<< 1);
2382 p
->mm
->numa_next_scan
= jiffies
+
2383 msecs_to_jiffies(p
->numa_scan_period
);
2389 * Prepare to scale scan period relative to the current period.
2390 * == NUMA_PERIOD_THRESHOLD scan period stays the same
2391 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
2392 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
2394 period_slot
= DIV_ROUND_UP(p
->numa_scan_period
, NUMA_PERIOD_SLOTS
);
2395 lr_ratio
= (local
* NUMA_PERIOD_SLOTS
) / (local
+ remote
);
2396 ps_ratio
= (private * NUMA_PERIOD_SLOTS
) / (private + shared
);
2398 if (ps_ratio
>= NUMA_PERIOD_THRESHOLD
) {
2400 * Most memory accesses are local. There is no need to
2401 * do fast NUMA scanning, since memory is already local.
2403 int slot
= ps_ratio
- NUMA_PERIOD_THRESHOLD
;
2406 diff
= slot
* period_slot
;
2407 } else if (lr_ratio
>= NUMA_PERIOD_THRESHOLD
) {
2409 * Most memory accesses are shared with other tasks.
2410 * There is no point in continuing fast NUMA scanning,
2411 * since other tasks may just move the memory elsewhere.
2413 int slot
= lr_ratio
- NUMA_PERIOD_THRESHOLD
;
2416 diff
= slot
* period_slot
;
2419 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2420 * yet they are not on the local NUMA node. Speed up
2421 * NUMA scanning to get the memory moved over.
2423 int ratio
= max(lr_ratio
, ps_ratio
);
2424 diff
= -(NUMA_PERIOD_THRESHOLD
- ratio
) * period_slot
;
2427 p
->numa_scan_period
= clamp(p
->numa_scan_period
+ diff
,
2428 task_scan_min(p
), task_scan_max(p
));
2429 memset(p
->numa_faults_locality
, 0, sizeof(p
->numa_faults_locality
));
2433 * Get the fraction of time the task has been running since the last
2434 * NUMA placement cycle. The scheduler keeps similar statistics, but
2435 * decays those on a 32ms period, which is orders of magnitude off
2436 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2437 * stats only if the task is so new there are no NUMA statistics yet.
2439 static u64
numa_get_avg_runtime(struct task_struct
*p
, u64
*period
)
2441 u64 runtime
, delta
, now
;
2442 /* Use the start of this time slice to avoid calculations. */
2443 now
= p
->se
.exec_start
;
2444 runtime
= p
->se
.sum_exec_runtime
;
2446 if (p
->last_task_numa_placement
) {
2447 delta
= runtime
- p
->last_sum_exec_runtime
;
2448 *period
= now
- p
->last_task_numa_placement
;
2450 /* Avoid time going backwards, prevent potential divide error: */
2451 if (unlikely((s64
)*period
< 0))
2454 delta
= p
->se
.avg
.load_sum
;
2455 *period
= LOAD_AVG_MAX
;
2458 p
->last_sum_exec_runtime
= runtime
;
2459 p
->last_task_numa_placement
= now
;
2465 * Determine the preferred nid for a task in a numa_group. This needs to
2466 * be done in a way that produces consistent results with group_weight,
2467 * otherwise workloads might not converge.
2469 static int preferred_group_nid(struct task_struct
*p
, int nid
)
2474 /* Direct connections between all NUMA nodes. */
2475 if (sched_numa_topology_type
== NUMA_DIRECT
)
2479 * On a system with glueless mesh NUMA topology, group_weight
2480 * scores nodes according to the number of NUMA hinting faults on
2481 * both the node itself, and on nearby nodes.
2483 if (sched_numa_topology_type
== NUMA_GLUELESS_MESH
) {
2484 unsigned long score
, max_score
= 0;
2485 int node
, max_node
= nid
;
2487 dist
= sched_max_numa_distance
;
2489 for_each_node_state(node
, N_CPU
) {
2490 score
= group_weight(p
, node
, dist
);
2491 if (score
> max_score
) {
2500 * Finding the preferred nid in a system with NUMA backplane
2501 * interconnect topology is more involved. The goal is to locate
2502 * tasks from numa_groups near each other in the system, and
2503 * untangle workloads from different sides of the system. This requires
2504 * searching down the hierarchy of node groups, recursively searching
2505 * inside the highest scoring group of nodes. The nodemask tricks
2506 * keep the complexity of the search down.
2508 nodes
= node_states
[N_CPU
];
2509 for (dist
= sched_max_numa_distance
; dist
> LOCAL_DISTANCE
; dist
--) {
2510 unsigned long max_faults
= 0;
2511 nodemask_t max_group
= NODE_MASK_NONE
;
2514 /* Are there nodes at this distance from each other? */
2515 if (!find_numa_distance(dist
))
2518 for_each_node_mask(a
, nodes
) {
2519 unsigned long faults
= 0;
2520 nodemask_t this_group
;
2521 nodes_clear(this_group
);
2523 /* Sum group's NUMA faults; includes a==b case. */
2524 for_each_node_mask(b
, nodes
) {
2525 if (node_distance(a
, b
) < dist
) {
2526 faults
+= group_faults(p
, b
);
2527 node_set(b
, this_group
);
2528 node_clear(b
, nodes
);
2532 /* Remember the top group. */
2533 if (faults
> max_faults
) {
2534 max_faults
= faults
;
2535 max_group
= this_group
;
2537 * subtle: at the smallest distance there is
2538 * just one node left in each "group", the
2539 * winner is the preferred nid.
2544 /* Next round, evaluate the nodes within max_group. */
2552 static void task_numa_placement(struct task_struct
*p
)
2554 int seq
, nid
, max_nid
= NUMA_NO_NODE
;
2555 unsigned long max_faults
= 0;
2556 unsigned long fault_types
[2] = { 0, 0 };
2557 unsigned long total_faults
;
2558 u64 runtime
, period
;
2559 spinlock_t
*group_lock
= NULL
;
2560 struct numa_group
*ng
;
2563 * The p->mm->numa_scan_seq field gets updated without
2564 * exclusive access. Use READ_ONCE() here to ensure
2565 * that the field is read in a single access:
2567 seq
= READ_ONCE(p
->mm
->numa_scan_seq
);
2568 if (p
->numa_scan_seq
== seq
)
2570 p
->numa_scan_seq
= seq
;
2571 p
->numa_scan_period_max
= task_scan_max(p
);
2573 total_faults
= p
->numa_faults_locality
[0] +
2574 p
->numa_faults_locality
[1];
2575 runtime
= numa_get_avg_runtime(p
, &period
);
2577 /* If the task is part of a group prevent parallel updates to group stats */
2578 ng
= deref_curr_numa_group(p
);
2580 group_lock
= &ng
->lock
;
2581 spin_lock_irq(group_lock
);
2584 /* Find the node with the highest number of faults */
2585 for_each_online_node(nid
) {
2586 /* Keep track of the offsets in numa_faults array */
2587 int mem_idx
, membuf_idx
, cpu_idx
, cpubuf_idx
;
2588 unsigned long faults
= 0, group_faults
= 0;
2591 for (priv
= 0; priv
< NR_NUMA_HINT_FAULT_TYPES
; priv
++) {
2592 long diff
, f_diff
, f_weight
;
2594 mem_idx
= task_faults_idx(NUMA_MEM
, nid
, priv
);
2595 membuf_idx
= task_faults_idx(NUMA_MEMBUF
, nid
, priv
);
2596 cpu_idx
= task_faults_idx(NUMA_CPU
, nid
, priv
);
2597 cpubuf_idx
= task_faults_idx(NUMA_CPUBUF
, nid
, priv
);
2599 /* Decay existing window, copy faults since last scan */
2600 diff
= p
->numa_faults
[membuf_idx
] - p
->numa_faults
[mem_idx
] / 2;
2601 fault_types
[priv
] += p
->numa_faults
[membuf_idx
];
2602 p
->numa_faults
[membuf_idx
] = 0;
2605 * Normalize the faults_from, so all tasks in a group
2606 * count according to CPU use, instead of by the raw
2607 * number of faults. Tasks with little runtime have
2608 * little over-all impact on throughput, and thus their
2609 * faults are less important.
2611 f_weight
= div64_u64(runtime
<< 16, period
+ 1);
2612 f_weight
= (f_weight
* p
->numa_faults
[cpubuf_idx
]) /
2614 f_diff
= f_weight
- p
->numa_faults
[cpu_idx
] / 2;
2615 p
->numa_faults
[cpubuf_idx
] = 0;
2617 p
->numa_faults
[mem_idx
] += diff
;
2618 p
->numa_faults
[cpu_idx
] += f_diff
;
2619 faults
+= p
->numa_faults
[mem_idx
];
2620 p
->total_numa_faults
+= diff
;
2623 * safe because we can only change our own group
2625 * mem_idx represents the offset for a given
2626 * nid and priv in a specific region because it
2627 * is at the beginning of the numa_faults array.
2629 ng
->faults
[mem_idx
] += diff
;
2630 ng
->faults
[cpu_idx
] += f_diff
;
2631 ng
->total_faults
+= diff
;
2632 group_faults
+= ng
->faults
[mem_idx
];
2637 if (faults
> max_faults
) {
2638 max_faults
= faults
;
2641 } else if (group_faults
> max_faults
) {
2642 max_faults
= group_faults
;
2647 /* Cannot migrate task to CPU-less node */
2648 if (max_nid
!= NUMA_NO_NODE
&& !node_state(max_nid
, N_CPU
)) {
2649 int near_nid
= max_nid
;
2650 int distance
, near_distance
= INT_MAX
;
2652 for_each_node_state(nid
, N_CPU
) {
2653 distance
= node_distance(max_nid
, nid
);
2654 if (distance
< near_distance
) {
2656 near_distance
= distance
;
2663 numa_group_count_active_nodes(ng
);
2664 spin_unlock_irq(group_lock
);
2665 max_nid
= preferred_group_nid(p
, max_nid
);
2669 /* Set the new preferred node */
2670 if (max_nid
!= p
->numa_preferred_nid
)
2671 sched_setnuma(p
, max_nid
);
2674 update_task_scan_period(p
, fault_types
[0], fault_types
[1]);
2677 static inline int get_numa_group(struct numa_group
*grp
)
2679 return refcount_inc_not_zero(&grp
->refcount
);
2682 static inline void put_numa_group(struct numa_group
*grp
)
2684 if (refcount_dec_and_test(&grp
->refcount
))
2685 kfree_rcu(grp
, rcu
);
2688 static void task_numa_group(struct task_struct
*p
, int cpupid
, int flags
,
2691 struct numa_group
*grp
, *my_grp
;
2692 struct task_struct
*tsk
;
2694 int cpu
= cpupid_to_cpu(cpupid
);
2697 if (unlikely(!deref_curr_numa_group(p
))) {
2698 unsigned int size
= sizeof(struct numa_group
) +
2699 NR_NUMA_HINT_FAULT_STATS
*
2700 nr_node_ids
* sizeof(unsigned long);
2702 grp
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
2706 refcount_set(&grp
->refcount
, 1);
2707 grp
->active_nodes
= 1;
2708 grp
->max_faults_cpu
= 0;
2709 spin_lock_init(&grp
->lock
);
2712 for (i
= 0; i
< NR_NUMA_HINT_FAULT_STATS
* nr_node_ids
; i
++)
2713 grp
->faults
[i
] = p
->numa_faults
[i
];
2715 grp
->total_faults
= p
->total_numa_faults
;
2718 rcu_assign_pointer(p
->numa_group
, grp
);
2722 tsk
= READ_ONCE(cpu_rq(cpu
)->curr
);
2724 if (!cpupid_match_pid(tsk
, cpupid
))
2727 grp
= rcu_dereference(tsk
->numa_group
);
2731 my_grp
= deref_curr_numa_group(p
);
2736 * Only join the other group if its bigger; if we're the bigger group,
2737 * the other task will join us.
2739 if (my_grp
->nr_tasks
> grp
->nr_tasks
)
2743 * Tie-break on the grp address.
2745 if (my_grp
->nr_tasks
== grp
->nr_tasks
&& my_grp
> grp
)
2748 /* Always join threads in the same process. */
2749 if (tsk
->mm
== current
->mm
)
2752 /* Simple filter to avoid false positives due to PID collisions */
2753 if (flags
& TNF_SHARED
)
2756 /* Update priv based on whether false sharing was detected */
2759 if (join
&& !get_numa_group(grp
))
2767 WARN_ON_ONCE(irqs_disabled());
2768 double_lock_irq(&my_grp
->lock
, &grp
->lock
);
2770 for (i
= 0; i
< NR_NUMA_HINT_FAULT_STATS
* nr_node_ids
; i
++) {
2771 my_grp
->faults
[i
] -= p
->numa_faults
[i
];
2772 grp
->faults
[i
] += p
->numa_faults
[i
];
2774 my_grp
->total_faults
-= p
->total_numa_faults
;
2775 grp
->total_faults
+= p
->total_numa_faults
;
2780 spin_unlock(&my_grp
->lock
);
2781 spin_unlock_irq(&grp
->lock
);
2783 rcu_assign_pointer(p
->numa_group
, grp
);
2785 put_numa_group(my_grp
);
2794 * Get rid of NUMA statistics associated with a task (either current or dead).
2795 * If @final is set, the task is dead and has reached refcount zero, so we can
2796 * safely free all relevant data structures. Otherwise, there might be
2797 * concurrent reads from places like load balancing and procfs, and we should
2798 * reset the data back to default state without freeing ->numa_faults.
2800 void task_numa_free(struct task_struct
*p
, bool final
)
2802 /* safe: p either is current or is being freed by current */
2803 struct numa_group
*grp
= rcu_dereference_raw(p
->numa_group
);
2804 unsigned long *numa_faults
= p
->numa_faults
;
2805 unsigned long flags
;
2812 spin_lock_irqsave(&grp
->lock
, flags
);
2813 for (i
= 0; i
< NR_NUMA_HINT_FAULT_STATS
* nr_node_ids
; i
++)
2814 grp
->faults
[i
] -= p
->numa_faults
[i
];
2815 grp
->total_faults
-= p
->total_numa_faults
;
2818 spin_unlock_irqrestore(&grp
->lock
, flags
);
2819 RCU_INIT_POINTER(p
->numa_group
, NULL
);
2820 put_numa_group(grp
);
2824 p
->numa_faults
= NULL
;
2827 p
->total_numa_faults
= 0;
2828 for (i
= 0; i
< NR_NUMA_HINT_FAULT_STATS
* nr_node_ids
; i
++)
2834 * Got a PROT_NONE fault for a page on @node.
2836 void task_numa_fault(int last_cpupid
, int mem_node
, int pages
, int flags
)
2838 struct task_struct
*p
= current
;
2839 bool migrated
= flags
& TNF_MIGRATED
;
2840 int cpu_node
= task_node(current
);
2841 int local
= !!(flags
& TNF_FAULT_LOCAL
);
2842 struct numa_group
*ng
;
2845 if (!static_branch_likely(&sched_numa_balancing
))
2848 /* for example, ksmd faulting in a user's mm */
2853 * NUMA faults statistics are unnecessary for the slow memory
2854 * node for memory tiering mode.
2856 if (!node_is_toptier(mem_node
) &&
2857 (sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
||
2858 !cpupid_valid(last_cpupid
)))
2861 /* Allocate buffer to track faults on a per-node basis */
2862 if (unlikely(!p
->numa_faults
)) {
2863 int size
= sizeof(*p
->numa_faults
) *
2864 NR_NUMA_HINT_FAULT_BUCKETS
* nr_node_ids
;
2866 p
->numa_faults
= kzalloc(size
, GFP_KERNEL
|__GFP_NOWARN
);
2867 if (!p
->numa_faults
)
2870 p
->total_numa_faults
= 0;
2871 memset(p
->numa_faults_locality
, 0, sizeof(p
->numa_faults_locality
));
2875 * First accesses are treated as private, otherwise consider accesses
2876 * to be private if the accessing pid has not changed
2878 if (unlikely(last_cpupid
== (-1 & LAST_CPUPID_MASK
))) {
2881 priv
= cpupid_match_pid(p
, last_cpupid
);
2882 if (!priv
&& !(flags
& TNF_NO_GROUP
))
2883 task_numa_group(p
, last_cpupid
, flags
, &priv
);
2887 * If a workload spans multiple NUMA nodes, a shared fault that
2888 * occurs wholly within the set of nodes that the workload is
2889 * actively using should be counted as local. This allows the
2890 * scan rate to slow down when a workload has settled down.
2892 ng
= deref_curr_numa_group(p
);
2893 if (!priv
&& !local
&& ng
&& ng
->active_nodes
> 1 &&
2894 numa_is_active_node(cpu_node
, ng
) &&
2895 numa_is_active_node(mem_node
, ng
))
2899 * Retry to migrate task to preferred node periodically, in case it
2900 * previously failed, or the scheduler moved us.
2902 if (time_after(jiffies
, p
->numa_migrate_retry
)) {
2903 task_numa_placement(p
);
2904 numa_migrate_preferred(p
);
2908 p
->numa_pages_migrated
+= pages
;
2909 if (flags
& TNF_MIGRATE_FAIL
)
2910 p
->numa_faults_locality
[2] += pages
;
2912 p
->numa_faults
[task_faults_idx(NUMA_MEMBUF
, mem_node
, priv
)] += pages
;
2913 p
->numa_faults
[task_faults_idx(NUMA_CPUBUF
, cpu_node
, priv
)] += pages
;
2914 p
->numa_faults_locality
[local
] += pages
;
2917 static void reset_ptenuma_scan(struct task_struct
*p
)
2920 * We only did a read acquisition of the mmap sem, so
2921 * p->mm->numa_scan_seq is written to without exclusive access
2922 * and the update is not guaranteed to be atomic. That's not
2923 * much of an issue though, since this is just used for
2924 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2925 * expensive, to avoid any form of compiler optimizations:
2927 WRITE_ONCE(p
->mm
->numa_scan_seq
, READ_ONCE(p
->mm
->numa_scan_seq
) + 1);
2928 p
->mm
->numa_scan_offset
= 0;
2931 static bool vma_is_accessed(struct vm_area_struct
*vma
)
2935 * Allow unconditional access first two times, so that all the (pages)
2936 * of VMAs get prot_none fault introduced irrespective of accesses.
2937 * This is also done to avoid any side effect of task scanning
2938 * amplifying the unfairness of disjoint set of VMAs' access.
2940 if (READ_ONCE(current
->mm
->numa_scan_seq
) < 2)
2943 pids
= vma
->numab_state
->access_pids
[0] | vma
->numab_state
->access_pids
[1];
2944 return test_bit(hash_32(current
->pid
, ilog2(BITS_PER_LONG
)), &pids
);
2947 #define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay)
2950 * The expensive part of numa migration is done from task_work context.
2951 * Triggered from task_tick_numa().
2953 static void task_numa_work(struct callback_head
*work
)
2955 unsigned long migrate
, next_scan
, now
= jiffies
;
2956 struct task_struct
*p
= current
;
2957 struct mm_struct
*mm
= p
->mm
;
2958 u64 runtime
= p
->se
.sum_exec_runtime
;
2959 struct vm_area_struct
*vma
;
2960 unsigned long start
, end
;
2961 unsigned long nr_pte_updates
= 0;
2962 long pages
, virtpages
;
2963 struct vma_iterator vmi
;
2965 SCHED_WARN_ON(p
!= container_of(work
, struct task_struct
, numa_work
));
2969 * Who cares about NUMA placement when they're dying.
2971 * NOTE: make sure not to dereference p->mm before this check,
2972 * exit_task_work() happens _after_ exit_mm() so we could be called
2973 * without p->mm even though we still had it when we enqueued this
2976 if (p
->flags
& PF_EXITING
)
2979 if (!mm
->numa_next_scan
) {
2980 mm
->numa_next_scan
= now
+
2981 msecs_to_jiffies(sysctl_numa_balancing_scan_delay
);
2985 * Enforce maximal scan/migration frequency..
2987 migrate
= mm
->numa_next_scan
;
2988 if (time_before(now
, migrate
))
2991 if (p
->numa_scan_period
== 0) {
2992 p
->numa_scan_period_max
= task_scan_max(p
);
2993 p
->numa_scan_period
= task_scan_start(p
);
2996 next_scan
= now
+ msecs_to_jiffies(p
->numa_scan_period
);
2997 if (!try_cmpxchg(&mm
->numa_next_scan
, &migrate
, next_scan
))
3001 * Delay this task enough that another task of this mm will likely win
3002 * the next time around.
3004 p
->node_stamp
+= 2 * TICK_NSEC
;
3006 start
= mm
->numa_scan_offset
;
3007 pages
= sysctl_numa_balancing_scan_size
;
3008 pages
<<= 20 - PAGE_SHIFT
; /* MB in pages */
3009 virtpages
= pages
* 8; /* Scan up to this much virtual space */
3014 if (!mmap_read_trylock(mm
))
3016 vma_iter_init(&vmi
, mm
, start
);
3017 vma
= vma_next(&vmi
);
3019 reset_ptenuma_scan(p
);
3021 vma_iter_set(&vmi
, start
);
3022 vma
= vma_next(&vmi
);
3026 if (!vma_migratable(vma
) || !vma_policy_mof(vma
) ||
3027 is_vm_hugetlb_page(vma
) || (vma
->vm_flags
& VM_MIXEDMAP
)) {
3032 * Shared library pages mapped by multiple processes are not
3033 * migrated as it is expected they are cache replicated. Avoid
3034 * hinting faults in read-only file-backed mappings or the vdso
3035 * as migrating the pages will be of marginal benefit.
3038 (vma
->vm_file
&& (vma
->vm_flags
& (VM_READ
|VM_WRITE
)) == (VM_READ
)))
3042 * Skip inaccessible VMAs to avoid any confusion between
3043 * PROT_NONE and NUMA hinting ptes
3045 if (!vma_is_accessible(vma
))
3048 /* Initialise new per-VMA NUMAB state. */
3049 if (!vma
->numab_state
) {
3050 vma
->numab_state
= kzalloc(sizeof(struct vma_numab_state
),
3052 if (!vma
->numab_state
)
3055 vma
->numab_state
->next_scan
= now
+
3056 msecs_to_jiffies(sysctl_numa_balancing_scan_delay
);
3058 /* Reset happens after 4 times scan delay of scan start */
3059 vma
->numab_state
->next_pid_reset
= vma
->numab_state
->next_scan
+
3060 msecs_to_jiffies(VMA_PID_RESET_PERIOD
);
3064 * Scanning the VMA's of short lived tasks add more overhead. So
3065 * delay the scan for new VMAs.
3067 if (mm
->numa_scan_seq
&& time_before(jiffies
,
3068 vma
->numab_state
->next_scan
))
3071 /* Do not scan the VMA if task has not accessed */
3072 if (!vma_is_accessed(vma
))
3076 * RESET access PIDs regularly for old VMAs. Resetting after checking
3077 * vma for recent access to avoid clearing PID info before access..
3079 if (mm
->numa_scan_seq
&&
3080 time_after(jiffies
, vma
->numab_state
->next_pid_reset
)) {
3081 vma
->numab_state
->next_pid_reset
= vma
->numab_state
->next_pid_reset
+
3082 msecs_to_jiffies(VMA_PID_RESET_PERIOD
);
3083 vma
->numab_state
->access_pids
[0] = READ_ONCE(vma
->numab_state
->access_pids
[1]);
3084 vma
->numab_state
->access_pids
[1] = 0;
3088 start
= max(start
, vma
->vm_start
);
3089 end
= ALIGN(start
+ (pages
<< PAGE_SHIFT
), HPAGE_SIZE
);
3090 end
= min(end
, vma
->vm_end
);
3091 nr_pte_updates
= change_prot_numa(vma
, start
, end
);
3094 * Try to scan sysctl_numa_balancing_size worth of
3095 * hpages that have at least one present PTE that
3096 * is not already pte-numa. If the VMA contains
3097 * areas that are unused or already full of prot_numa
3098 * PTEs, scan up to virtpages, to skip through those
3102 pages
-= (end
- start
) >> PAGE_SHIFT
;
3103 virtpages
-= (end
- start
) >> PAGE_SHIFT
;
3106 if (pages
<= 0 || virtpages
<= 0)
3110 } while (end
!= vma
->vm_end
);
3111 } for_each_vma(vmi
, vma
);
3115 * It is possible to reach the end of the VMA list but the last few
3116 * VMAs are not guaranteed to the vma_migratable. If they are not, we
3117 * would find the !migratable VMA on the next scan but not reset the
3118 * scanner to the start so check it now.
3121 mm
->numa_scan_offset
= start
;
3123 reset_ptenuma_scan(p
);
3124 mmap_read_unlock(mm
);
3127 * Make sure tasks use at least 32x as much time to run other code
3128 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
3129 * Usually update_task_scan_period slows down scanning enough; on an
3130 * overloaded system we need to limit overhead on a per task basis.
3132 if (unlikely(p
->se
.sum_exec_runtime
!= runtime
)) {
3133 u64 diff
= p
->se
.sum_exec_runtime
- runtime
;
3134 p
->node_stamp
+= 32 * diff
;
3138 void init_numa_balancing(unsigned long clone_flags
, struct task_struct
*p
)
3141 struct mm_struct
*mm
= p
->mm
;
3144 mm_users
= atomic_read(&mm
->mm_users
);
3145 if (mm_users
== 1) {
3146 mm
->numa_next_scan
= jiffies
+ msecs_to_jiffies(sysctl_numa_balancing_scan_delay
);
3147 mm
->numa_scan_seq
= 0;
3151 p
->numa_scan_seq
= mm
? mm
->numa_scan_seq
: 0;
3152 p
->numa_scan_period
= sysctl_numa_balancing_scan_delay
;
3153 p
->numa_migrate_retry
= 0;
3154 /* Protect against double add, see task_tick_numa and task_numa_work */
3155 p
->numa_work
.next
= &p
->numa_work
;
3156 p
->numa_faults
= NULL
;
3157 p
->numa_pages_migrated
= 0;
3158 p
->total_numa_faults
= 0;
3159 RCU_INIT_POINTER(p
->numa_group
, NULL
);
3160 p
->last_task_numa_placement
= 0;
3161 p
->last_sum_exec_runtime
= 0;
3163 init_task_work(&p
->numa_work
, task_numa_work
);
3165 /* New address space, reset the preferred nid */
3166 if (!(clone_flags
& CLONE_VM
)) {
3167 p
->numa_preferred_nid
= NUMA_NO_NODE
;
3172 * New thread, keep existing numa_preferred_nid which should be copied
3173 * already by arch_dup_task_struct but stagger when scans start.
3178 delay
= min_t(unsigned int, task_scan_max(current
),
3179 current
->numa_scan_period
* mm_users
* NSEC_PER_MSEC
);
3180 delay
+= 2 * TICK_NSEC
;
3181 p
->node_stamp
= delay
;
3186 * Drive the periodic memory faults..
3188 static void task_tick_numa(struct rq
*rq
, struct task_struct
*curr
)
3190 struct callback_head
*work
= &curr
->numa_work
;
3194 * We don't care about NUMA placement if we don't have memory.
3196 if (!curr
->mm
|| (curr
->flags
& (PF_EXITING
| PF_KTHREAD
)) || work
->next
!= work
)
3200 * Using runtime rather than walltime has the dual advantage that
3201 * we (mostly) drive the selection from busy threads and that the
3202 * task needs to have done some actual work before we bother with
3205 now
= curr
->se
.sum_exec_runtime
;
3206 period
= (u64
)curr
->numa_scan_period
* NSEC_PER_MSEC
;
3208 if (now
> curr
->node_stamp
+ period
) {
3209 if (!curr
->node_stamp
)
3210 curr
->numa_scan_period
= task_scan_start(curr
);
3211 curr
->node_stamp
+= period
;
3213 if (!time_before(jiffies
, curr
->mm
->numa_next_scan
))
3214 task_work_add(curr
, work
, TWA_RESUME
);
3218 static void update_scan_period(struct task_struct
*p
, int new_cpu
)
3220 int src_nid
= cpu_to_node(task_cpu(p
));
3221 int dst_nid
= cpu_to_node(new_cpu
);
3223 if (!static_branch_likely(&sched_numa_balancing
))
3226 if (!p
->mm
|| !p
->numa_faults
|| (p
->flags
& PF_EXITING
))
3229 if (src_nid
== dst_nid
)
3233 * Allow resets if faults have been trapped before one scan
3234 * has completed. This is most likely due to a new task that
3235 * is pulled cross-node due to wakeups or load balancing.
3237 if (p
->numa_scan_seq
) {
3239 * Avoid scan adjustments if moving to the preferred
3240 * node or if the task was not previously running on
3241 * the preferred node.
3243 if (dst_nid
== p
->numa_preferred_nid
||
3244 (p
->numa_preferred_nid
!= NUMA_NO_NODE
&&
3245 src_nid
!= p
->numa_preferred_nid
))
3249 p
->numa_scan_period
= task_scan_start(p
);
3253 static void task_tick_numa(struct rq
*rq
, struct task_struct
*curr
)
3257 static inline void account_numa_enqueue(struct rq
*rq
, struct task_struct
*p
)
3261 static inline void account_numa_dequeue(struct rq
*rq
, struct task_struct
*p
)
3265 static inline void update_scan_period(struct task_struct
*p
, int new_cpu
)
3269 #endif /* CONFIG_NUMA_BALANCING */
3272 account_entity_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
3274 update_load_add(&cfs_rq
->load
, se
->load
.weight
);
3276 if (entity_is_task(se
)) {
3277 struct rq
*rq
= rq_of(cfs_rq
);
3279 account_numa_enqueue(rq
, task_of(se
));
3280 list_add(&se
->group_node
, &rq
->cfs_tasks
);
3283 cfs_rq
->nr_running
++;
3285 cfs_rq
->idle_nr_running
++;
3289 account_entity_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
3291 update_load_sub(&cfs_rq
->load
, se
->load
.weight
);
3293 if (entity_is_task(se
)) {
3294 account_numa_dequeue(rq_of(cfs_rq
), task_of(se
));
3295 list_del_init(&se
->group_node
);
3298 cfs_rq
->nr_running
--;
3300 cfs_rq
->idle_nr_running
--;
3304 * Signed add and clamp on underflow.
3306 * Explicitly do a load-store to ensure the intermediate value never hits
3307 * memory. This allows lockless observations without ever seeing the negative
3310 #define add_positive(_ptr, _val) do { \
3311 typeof(_ptr) ptr = (_ptr); \
3312 typeof(_val) val = (_val); \
3313 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3317 if (val < 0 && res > var) \
3320 WRITE_ONCE(*ptr, res); \
3324 * Unsigned subtract and clamp on underflow.
3326 * Explicitly do a load-store to ensure the intermediate value never hits
3327 * memory. This allows lockless observations without ever seeing the negative
3330 #define sub_positive(_ptr, _val) do { \
3331 typeof(_ptr) ptr = (_ptr); \
3332 typeof(*ptr) val = (_val); \
3333 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3337 WRITE_ONCE(*ptr, res); \
3341 * Remove and clamp on negative, from a local variable.
3343 * A variant of sub_positive(), which does not use explicit load-store
3344 * and is thus optimized for local variable updates.
3346 #define lsub_positive(_ptr, _val) do { \
3347 typeof(_ptr) ptr = (_ptr); \
3348 *ptr -= min_t(typeof(*ptr), *ptr, _val); \
3353 enqueue_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
3355 cfs_rq
->avg
.load_avg
+= se
->avg
.load_avg
;
3356 cfs_rq
->avg
.load_sum
+= se_weight(se
) * se
->avg
.load_sum
;
3360 dequeue_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
3362 sub_positive(&cfs_rq
->avg
.load_avg
, se
->avg
.load_avg
);
3363 sub_positive(&cfs_rq
->avg
.load_sum
, se_weight(se
) * se
->avg
.load_sum
);
3364 /* See update_cfs_rq_load_avg() */
3365 cfs_rq
->avg
.load_sum
= max_t(u32
, cfs_rq
->avg
.load_sum
,
3366 cfs_rq
->avg
.load_avg
* PELT_MIN_DIVIDER
);
3370 enqueue_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
) { }
3372 dequeue_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
) { }
3375 static void reweight_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
,
3376 unsigned long weight
)
3379 /* commit outstanding execution time */
3380 if (cfs_rq
->curr
== se
)
3381 update_curr(cfs_rq
);
3382 update_load_sub(&cfs_rq
->load
, se
->load
.weight
);
3384 dequeue_load_avg(cfs_rq
, se
);
3386 update_load_set(&se
->load
, weight
);
3390 u32 divider
= get_pelt_divider(&se
->avg
);
3392 se
->avg
.load_avg
= div_u64(se_weight(se
) * se
->avg
.load_sum
, divider
);
3396 enqueue_load_avg(cfs_rq
, se
);
3398 update_load_add(&cfs_rq
->load
, se
->load
.weight
);
3402 void reweight_task(struct task_struct
*p
, int prio
)
3404 struct sched_entity
*se
= &p
->se
;
3405 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
3406 struct load_weight
*load
= &se
->load
;
3407 unsigned long weight
= scale_load(sched_prio_to_weight
[prio
]);
3409 reweight_entity(cfs_rq
, se
, weight
);
3410 load
->inv_weight
= sched_prio_to_wmult
[prio
];
3413 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
);
3415 #ifdef CONFIG_FAIR_GROUP_SCHED
3418 * All this does is approximate the hierarchical proportion which includes that
3419 * global sum we all love to hate.
3421 * That is, the weight of a group entity, is the proportional share of the
3422 * group weight based on the group runqueue weights. That is:
3424 * tg->weight * grq->load.weight
3425 * ge->load.weight = ----------------------------- (1)
3426 * \Sum grq->load.weight
3428 * Now, because computing that sum is prohibitively expensive to compute (been
3429 * there, done that) we approximate it with this average stuff. The average
3430 * moves slower and therefore the approximation is cheaper and more stable.
3432 * So instead of the above, we substitute:
3434 * grq->load.weight -> grq->avg.load_avg (2)
3436 * which yields the following:
3438 * tg->weight * grq->avg.load_avg
3439 * ge->load.weight = ------------------------------ (3)
3442 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3444 * That is shares_avg, and it is right (given the approximation (2)).
3446 * The problem with it is that because the average is slow -- it was designed
3447 * to be exactly that of course -- this leads to transients in boundary
3448 * conditions. In specific, the case where the group was idle and we start the
3449 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3450 * yielding bad latency etc..
3452 * Now, in that special case (1) reduces to:
3454 * tg->weight * grq->load.weight
3455 * ge->load.weight = ----------------------------- = tg->weight (4)
3458 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
3460 * So what we do is modify our approximation (3) to approach (4) in the (near)
3465 * tg->weight * grq->load.weight
3466 * --------------------------------------------------- (5)
3467 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3469 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3470 * we need to use grq->avg.load_avg as its lower bound, which then gives:
3473 * tg->weight * grq->load.weight
3474 * ge->load.weight = ----------------------------- (6)
3479 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3480 * max(grq->load.weight, grq->avg.load_avg)
3482 * And that is shares_weight and is icky. In the (near) UP case it approaches
3483 * (4) while in the normal case it approaches (3). It consistently
3484 * overestimates the ge->load.weight and therefore:
3486 * \Sum ge->load.weight >= tg->weight
3490 static long calc_group_shares(struct cfs_rq
*cfs_rq
)
3492 long tg_weight
, tg_shares
, load
, shares
;
3493 struct task_group
*tg
= cfs_rq
->tg
;
3495 tg_shares
= READ_ONCE(tg
->shares
);
3497 load
= max(scale_load_down(cfs_rq
->load
.weight
), cfs_rq
->avg
.load_avg
);
3499 tg_weight
= atomic_long_read(&tg
->load_avg
);
3501 /* Ensure tg_weight >= load */
3502 tg_weight
-= cfs_rq
->tg_load_avg_contrib
;
3505 shares
= (tg_shares
* load
);
3507 shares
/= tg_weight
;
3510 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
3511 * of a group with small tg->shares value. It is a floor value which is
3512 * assigned as a minimum load.weight to the sched_entity representing
3513 * the group on a CPU.
3515 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
3516 * on an 8-core system with 8 tasks each runnable on one CPU shares has
3517 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
3518 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
3521 return clamp_t(long, shares
, MIN_SHARES
, tg_shares
);
3523 #endif /* CONFIG_SMP */
3526 * Recomputes the group entity based on the current state of its group
3529 static void update_cfs_group(struct sched_entity
*se
)
3531 struct cfs_rq
*gcfs_rq
= group_cfs_rq(se
);
3537 if (throttled_hierarchy(gcfs_rq
))
3541 shares
= READ_ONCE(gcfs_rq
->tg
->shares
);
3543 if (likely(se
->load
.weight
== shares
))
3546 shares
= calc_group_shares(gcfs_rq
);
3549 reweight_entity(cfs_rq_of(se
), se
, shares
);
3552 #else /* CONFIG_FAIR_GROUP_SCHED */
3553 static inline void update_cfs_group(struct sched_entity
*se
)
3556 #endif /* CONFIG_FAIR_GROUP_SCHED */
3558 static inline void cfs_rq_util_change(struct cfs_rq
*cfs_rq
, int flags
)
3560 struct rq
*rq
= rq_of(cfs_rq
);
3562 if (&rq
->cfs
== cfs_rq
) {
3564 * There are a few boundary cases this might miss but it should
3565 * get called often enough that that should (hopefully) not be
3568 * It will not get called when we go idle, because the idle
3569 * thread is a different class (!fair), nor will the utilization
3570 * number include things like RT tasks.
3572 * As is, the util number is not freq-invariant (we'd have to
3573 * implement arch_scale_freq_capacity() for that).
3575 * See cpu_util_cfs().
3577 cpufreq_update_util(rq
, flags
);
3582 static inline bool load_avg_is_decayed(struct sched_avg
*sa
)
3590 if (sa
->runnable_sum
)
3594 * _avg must be null when _sum are null because _avg = _sum / divider
3595 * Make sure that rounding and/or propagation of PELT values never
3598 SCHED_WARN_ON(sa
->load_avg
||
3605 static inline u64
cfs_rq_last_update_time(struct cfs_rq
*cfs_rq
)
3607 return u64_u32_load_copy(cfs_rq
->avg
.last_update_time
,
3608 cfs_rq
->last_update_time_copy
);
3610 #ifdef CONFIG_FAIR_GROUP_SCHED
3612 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
3613 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
3614 * bottom-up, we only have to test whether the cfs_rq before us on the list
3616 * If cfs_rq is not on the list, test whether a child needs its to be added to
3617 * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
3619 static inline bool child_cfs_rq_on_list(struct cfs_rq
*cfs_rq
)
3621 struct cfs_rq
*prev_cfs_rq
;
3622 struct list_head
*prev
;
3624 if (cfs_rq
->on_list
) {
3625 prev
= cfs_rq
->leaf_cfs_rq_list
.prev
;
3627 struct rq
*rq
= rq_of(cfs_rq
);
3629 prev
= rq
->tmp_alone_branch
;
3632 prev_cfs_rq
= container_of(prev
, struct cfs_rq
, leaf_cfs_rq_list
);
3634 return (prev_cfs_rq
->tg
->parent
== cfs_rq
->tg
);
3637 static inline bool cfs_rq_is_decayed(struct cfs_rq
*cfs_rq
)
3639 if (cfs_rq
->load
.weight
)
3642 if (!load_avg_is_decayed(&cfs_rq
->avg
))
3645 if (child_cfs_rq_on_list(cfs_rq
))
3652 * update_tg_load_avg - update the tg's load avg
3653 * @cfs_rq: the cfs_rq whose avg changed
3655 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3656 * However, because tg->load_avg is a global value there are performance
3659 * In order to avoid having to look at the other cfs_rq's, we use a
3660 * differential update where we store the last value we propagated. This in
3661 * turn allows skipping updates if the differential is 'small'.
3663 * Updating tg's load_avg is necessary before update_cfs_share().
3665 static inline void update_tg_load_avg(struct cfs_rq
*cfs_rq
)
3667 long delta
= cfs_rq
->avg
.load_avg
- cfs_rq
->tg_load_avg_contrib
;
3670 * No need to update load_avg for root_task_group as it is not used.
3672 if (cfs_rq
->tg
== &root_task_group
)
3675 if (abs(delta
) > cfs_rq
->tg_load_avg_contrib
/ 64) {
3676 atomic_long_add(delta
, &cfs_rq
->tg
->load_avg
);
3677 cfs_rq
->tg_load_avg_contrib
= cfs_rq
->avg
.load_avg
;
3682 * Called within set_task_rq() right before setting a task's CPU. The
3683 * caller only guarantees p->pi_lock is held; no other assumptions,
3684 * including the state of rq->lock, should be made.
3686 void set_task_rq_fair(struct sched_entity
*se
,
3687 struct cfs_rq
*prev
, struct cfs_rq
*next
)
3689 u64 p_last_update_time
;
3690 u64 n_last_update_time
;
3692 if (!sched_feat(ATTACH_AGE_LOAD
))
3696 * We are supposed to update the task to "current" time, then its up to
3697 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3698 * getting what current time is, so simply throw away the out-of-date
3699 * time. This will result in the wakee task is less decayed, but giving
3700 * the wakee more load sounds not bad.
3702 if (!(se
->avg
.last_update_time
&& prev
))
3705 p_last_update_time
= cfs_rq_last_update_time(prev
);
3706 n_last_update_time
= cfs_rq_last_update_time(next
);
3708 __update_load_avg_blocked_se(p_last_update_time
, se
);
3709 se
->avg
.last_update_time
= n_last_update_time
;
3713 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3714 * propagate its contribution. The key to this propagation is the invariant
3715 * that for each group:
3717 * ge->avg == grq->avg (1)
3719 * _IFF_ we look at the pure running and runnable sums. Because they
3720 * represent the very same entity, just at different points in the hierarchy.
3722 * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
3723 * and simply copies the running/runnable sum over (but still wrong, because
3724 * the group entity and group rq do not have their PELT windows aligned).
3726 * However, update_tg_cfs_load() is more complex. So we have:
3728 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3730 * And since, like util, the runnable part should be directly transferable,
3731 * the following would _appear_ to be the straight forward approach:
3733 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3735 * And per (1) we have:
3737 * ge->avg.runnable_avg == grq->avg.runnable_avg
3741 * ge->load.weight * grq->avg.load_avg
3742 * ge->avg.load_avg = ----------------------------------- (4)
3745 * Except that is wrong!
3747 * Because while for entities historical weight is not important and we
3748 * really only care about our future and therefore can consider a pure
3749 * runnable sum, runqueues can NOT do this.
3751 * We specifically want runqueues to have a load_avg that includes
3752 * historical weights. Those represent the blocked load, the load we expect
3753 * to (shortly) return to us. This only works by keeping the weights as
3754 * integral part of the sum. We therefore cannot decompose as per (3).
3756 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3757 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3758 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3759 * runnable section of these tasks overlap (or not). If they were to perfectly
3760 * align the rq as a whole would be runnable 2/3 of the time. If however we
3761 * always have at least 1 runnable task, the rq as a whole is always runnable.
3763 * So we'll have to approximate.. :/
3765 * Given the constraint:
3767 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
3769 * We can construct a rule that adds runnable to a rq by assuming minimal
3772 * On removal, we'll assume each task is equally runnable; which yields:
3774 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3776 * XXX: only do this for the part of runnable > running ?
3780 update_tg_cfs_util(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, struct cfs_rq
*gcfs_rq
)
3782 long delta_sum
, delta_avg
= gcfs_rq
->avg
.util_avg
- se
->avg
.util_avg
;
3783 u32 new_sum
, divider
;
3785 /* Nothing to update */
3790 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3791 * See ___update_load_avg() for details.
3793 divider
= get_pelt_divider(&cfs_rq
->avg
);
3796 /* Set new sched_entity's utilization */
3797 se
->avg
.util_avg
= gcfs_rq
->avg
.util_avg
;
3798 new_sum
= se
->avg
.util_avg
* divider
;
3799 delta_sum
= (long)new_sum
- (long)se
->avg
.util_sum
;
3800 se
->avg
.util_sum
= new_sum
;
3802 /* Update parent cfs_rq utilization */
3803 add_positive(&cfs_rq
->avg
.util_avg
, delta_avg
);
3804 add_positive(&cfs_rq
->avg
.util_sum
, delta_sum
);
3806 /* See update_cfs_rq_load_avg() */
3807 cfs_rq
->avg
.util_sum
= max_t(u32
, cfs_rq
->avg
.util_sum
,
3808 cfs_rq
->avg
.util_avg
* PELT_MIN_DIVIDER
);
3812 update_tg_cfs_runnable(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, struct cfs_rq
*gcfs_rq
)
3814 long delta_sum
, delta_avg
= gcfs_rq
->avg
.runnable_avg
- se
->avg
.runnable_avg
;
3815 u32 new_sum
, divider
;
3817 /* Nothing to update */
3822 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3823 * See ___update_load_avg() for details.
3825 divider
= get_pelt_divider(&cfs_rq
->avg
);
3827 /* Set new sched_entity's runnable */
3828 se
->avg
.runnable_avg
= gcfs_rq
->avg
.runnable_avg
;
3829 new_sum
= se
->avg
.runnable_avg
* divider
;
3830 delta_sum
= (long)new_sum
- (long)se
->avg
.runnable_sum
;
3831 se
->avg
.runnable_sum
= new_sum
;
3833 /* Update parent cfs_rq runnable */
3834 add_positive(&cfs_rq
->avg
.runnable_avg
, delta_avg
);
3835 add_positive(&cfs_rq
->avg
.runnable_sum
, delta_sum
);
3836 /* See update_cfs_rq_load_avg() */
3837 cfs_rq
->avg
.runnable_sum
= max_t(u32
, cfs_rq
->avg
.runnable_sum
,
3838 cfs_rq
->avg
.runnable_avg
* PELT_MIN_DIVIDER
);
3842 update_tg_cfs_load(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, struct cfs_rq
*gcfs_rq
)
3844 long delta_avg
, running_sum
, runnable_sum
= gcfs_rq
->prop_runnable_sum
;
3845 unsigned long load_avg
;
3853 gcfs_rq
->prop_runnable_sum
= 0;
3856 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3857 * See ___update_load_avg() for details.
3859 divider
= get_pelt_divider(&cfs_rq
->avg
);
3861 if (runnable_sum
>= 0) {
3863 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3864 * the CPU is saturated running == runnable.
3866 runnable_sum
+= se
->avg
.load_sum
;
3867 runnable_sum
= min_t(long, runnable_sum
, divider
);
3870 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3871 * assuming all tasks are equally runnable.
3873 if (scale_load_down(gcfs_rq
->load
.weight
)) {
3874 load_sum
= div_u64(gcfs_rq
->avg
.load_sum
,
3875 scale_load_down(gcfs_rq
->load
.weight
));
3878 /* But make sure to not inflate se's runnable */
3879 runnable_sum
= min(se
->avg
.load_sum
, load_sum
);
3883 * runnable_sum can't be lower than running_sum
3884 * Rescale running sum to be in the same range as runnable sum
3885 * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
3886 * runnable_sum is in [0 : LOAD_AVG_MAX]
3888 running_sum
= se
->avg
.util_sum
>> SCHED_CAPACITY_SHIFT
;
3889 runnable_sum
= max(runnable_sum
, running_sum
);
3891 load_sum
= se_weight(se
) * runnable_sum
;
3892 load_avg
= div_u64(load_sum
, divider
);
3894 delta_avg
= load_avg
- se
->avg
.load_avg
;
3898 delta_sum
= load_sum
- (s64
)se_weight(se
) * se
->avg
.load_sum
;
3900 se
->avg
.load_sum
= runnable_sum
;
3901 se
->avg
.load_avg
= load_avg
;
3902 add_positive(&cfs_rq
->avg
.load_avg
, delta_avg
);
3903 add_positive(&cfs_rq
->avg
.load_sum
, delta_sum
);
3904 /* See update_cfs_rq_load_avg() */
3905 cfs_rq
->avg
.load_sum
= max_t(u32
, cfs_rq
->avg
.load_sum
,
3906 cfs_rq
->avg
.load_avg
* PELT_MIN_DIVIDER
);
3909 static inline void add_tg_cfs_propagate(struct cfs_rq
*cfs_rq
, long runnable_sum
)
3911 cfs_rq
->propagate
= 1;
3912 cfs_rq
->prop_runnable_sum
+= runnable_sum
;
3915 /* Update task and its cfs_rq load average */
3916 static inline int propagate_entity_load_avg(struct sched_entity
*se
)
3918 struct cfs_rq
*cfs_rq
, *gcfs_rq
;
3920 if (entity_is_task(se
))
3923 gcfs_rq
= group_cfs_rq(se
);
3924 if (!gcfs_rq
->propagate
)
3927 gcfs_rq
->propagate
= 0;
3929 cfs_rq
= cfs_rq_of(se
);
3931 add_tg_cfs_propagate(cfs_rq
, gcfs_rq
->prop_runnable_sum
);
3933 update_tg_cfs_util(cfs_rq
, se
, gcfs_rq
);
3934 update_tg_cfs_runnable(cfs_rq
, se
, gcfs_rq
);
3935 update_tg_cfs_load(cfs_rq
, se
, gcfs_rq
);
3937 trace_pelt_cfs_tp(cfs_rq
);
3938 trace_pelt_se_tp(se
);
3944 * Check if we need to update the load and the utilization of a blocked
3947 static inline bool skip_blocked_update(struct sched_entity
*se
)
3949 struct cfs_rq
*gcfs_rq
= group_cfs_rq(se
);
3952 * If sched_entity still have not zero load or utilization, we have to
3955 if (se
->avg
.load_avg
|| se
->avg
.util_avg
)
3959 * If there is a pending propagation, we have to update the load and
3960 * the utilization of the sched_entity:
3962 if (gcfs_rq
->propagate
)
3966 * Otherwise, the load and the utilization of the sched_entity is
3967 * already zero and there is no pending propagation, so it will be a
3968 * waste of time to try to decay it:
3973 #else /* CONFIG_FAIR_GROUP_SCHED */
3975 static inline void update_tg_load_avg(struct cfs_rq
*cfs_rq
) {}
3977 static inline int propagate_entity_load_avg(struct sched_entity
*se
)
3982 static inline void add_tg_cfs_propagate(struct cfs_rq
*cfs_rq
, long runnable_sum
) {}
3984 #endif /* CONFIG_FAIR_GROUP_SCHED */
3986 #ifdef CONFIG_NO_HZ_COMMON
3987 static inline void migrate_se_pelt_lag(struct sched_entity
*se
)
3989 u64 throttled
= 0, now
, lut
;
3990 struct cfs_rq
*cfs_rq
;
3994 if (load_avg_is_decayed(&se
->avg
))
3997 cfs_rq
= cfs_rq_of(se
);
4001 is_idle
= is_idle_task(rcu_dereference(rq
->curr
));
4005 * The lag estimation comes with a cost we don't want to pay all the
4006 * time. Hence, limiting to the case where the source CPU is idle and
4007 * we know we are at the greatest risk to have an outdated clock.
4013 * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
4015 * last_update_time (the cfs_rq's last_update_time)
4016 * = cfs_rq_clock_pelt()@cfs_rq_idle
4017 * = rq_clock_pelt()@cfs_rq_idle
4018 * - cfs->throttled_clock_pelt_time@cfs_rq_idle
4020 * cfs_idle_lag (delta between rq's update and cfs_rq's update)
4021 * = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
4023 * rq_idle_lag (delta between now and rq's update)
4024 * = sched_clock_cpu() - rq_clock()@rq_idle
4026 * We can then write:
4028 * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
4029 * sched_clock_cpu() - rq_clock()@rq_idle
4031 * rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
4032 * rq_clock()@rq_idle is rq->clock_idle
4033 * cfs->throttled_clock_pelt_time@cfs_rq_idle
4034 * is cfs_rq->throttled_pelt_idle
4037 #ifdef CONFIG_CFS_BANDWIDTH
4038 throttled
= u64_u32_load(cfs_rq
->throttled_pelt_idle
);
4039 /* The clock has been stopped for throttling */
4040 if (throttled
== U64_MAX
)
4043 now
= u64_u32_load(rq
->clock_pelt_idle
);
4045 * Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case
4046 * is observed the old clock_pelt_idle value and the new clock_idle,
4047 * which lead to an underestimation. The opposite would lead to an
4051 lut
= cfs_rq_last_update_time(cfs_rq
);
4056 * cfs_rq->avg.last_update_time is more recent than our
4057 * estimation, let's use it.
4061 now
+= sched_clock_cpu(cpu_of(rq
)) - u64_u32_load(rq
->clock_idle
);
4063 __update_load_avg_blocked_se(now
, se
);
4066 static void migrate_se_pelt_lag(struct sched_entity
*se
) {}
4070 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
4071 * @now: current time, as per cfs_rq_clock_pelt()
4072 * @cfs_rq: cfs_rq to update
4074 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
4075 * avg. The immediate corollary is that all (fair) tasks must be attached.
4077 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
4079 * Return: true if the load decayed or we removed load.
4081 * Since both these conditions indicate a changed cfs_rq->avg.load we should
4082 * call update_tg_load_avg() when this function returns true.
4085 update_cfs_rq_load_avg(u64 now
, struct cfs_rq
*cfs_rq
)
4087 unsigned long removed_load
= 0, removed_util
= 0, removed_runnable
= 0;
4088 struct sched_avg
*sa
= &cfs_rq
->avg
;
4091 if (cfs_rq
->removed
.nr
) {
4093 u32 divider
= get_pelt_divider(&cfs_rq
->avg
);
4095 raw_spin_lock(&cfs_rq
->removed
.lock
);
4096 swap(cfs_rq
->removed
.util_avg
, removed_util
);
4097 swap(cfs_rq
->removed
.load_avg
, removed_load
);
4098 swap(cfs_rq
->removed
.runnable_avg
, removed_runnable
);
4099 cfs_rq
->removed
.nr
= 0;
4100 raw_spin_unlock(&cfs_rq
->removed
.lock
);
4103 sub_positive(&sa
->load_avg
, r
);
4104 sub_positive(&sa
->load_sum
, r
* divider
);
4105 /* See sa->util_sum below */
4106 sa
->load_sum
= max_t(u32
, sa
->load_sum
, sa
->load_avg
* PELT_MIN_DIVIDER
);
4109 sub_positive(&sa
->util_avg
, r
);
4110 sub_positive(&sa
->util_sum
, r
* divider
);
4112 * Because of rounding, se->util_sum might ends up being +1 more than
4113 * cfs->util_sum. Although this is not a problem by itself, detaching
4114 * a lot of tasks with the rounding problem between 2 updates of
4115 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
4116 * cfs_util_avg is not.
4117 * Check that util_sum is still above its lower bound for the new
4118 * util_avg. Given that period_contrib might have moved since the last
4119 * sync, we are only sure that util_sum must be above or equal to
4120 * util_avg * minimum possible divider
4122 sa
->util_sum
= max_t(u32
, sa
->util_sum
, sa
->util_avg
* PELT_MIN_DIVIDER
);
4124 r
= removed_runnable
;
4125 sub_positive(&sa
->runnable_avg
, r
);
4126 sub_positive(&sa
->runnable_sum
, r
* divider
);
4127 /* See sa->util_sum above */
4128 sa
->runnable_sum
= max_t(u32
, sa
->runnable_sum
,
4129 sa
->runnable_avg
* PELT_MIN_DIVIDER
);
4132 * removed_runnable is the unweighted version of removed_load so we
4133 * can use it to estimate removed_load_sum.
4135 add_tg_cfs_propagate(cfs_rq
,
4136 -(long)(removed_runnable
* divider
) >> SCHED_CAPACITY_SHIFT
);
4141 decayed
|= __update_load_avg_cfs_rq(now
, cfs_rq
);
4142 u64_u32_store_copy(sa
->last_update_time
,
4143 cfs_rq
->last_update_time_copy
,
4144 sa
->last_update_time
);
4149 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
4150 * @cfs_rq: cfs_rq to attach to
4151 * @se: sched_entity to attach
4153 * Must call update_cfs_rq_load_avg() before this, since we rely on
4154 * cfs_rq->avg.last_update_time being current.
4156 static void attach_entity_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
4159 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4160 * See ___update_load_avg() for details.
4162 u32 divider
= get_pelt_divider(&cfs_rq
->avg
);
4165 * When we attach the @se to the @cfs_rq, we must align the decay
4166 * window because without that, really weird and wonderful things can
4171 se
->avg
.last_update_time
= cfs_rq
->avg
.last_update_time
;
4172 se
->avg
.period_contrib
= cfs_rq
->avg
.period_contrib
;
4175 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
4176 * period_contrib. This isn't strictly correct, but since we're
4177 * entirely outside of the PELT hierarchy, nobody cares if we truncate
4180 se
->avg
.util_sum
= se
->avg
.util_avg
* divider
;
4182 se
->avg
.runnable_sum
= se
->avg
.runnable_avg
* divider
;
4184 se
->avg
.load_sum
= se
->avg
.load_avg
* divider
;
4185 if (se_weight(se
) < se
->avg
.load_sum
)
4186 se
->avg
.load_sum
= div_u64(se
->avg
.load_sum
, se_weight(se
));
4188 se
->avg
.load_sum
= 1;
4190 enqueue_load_avg(cfs_rq
, se
);
4191 cfs_rq
->avg
.util_avg
+= se
->avg
.util_avg
;
4192 cfs_rq
->avg
.util_sum
+= se
->avg
.util_sum
;
4193 cfs_rq
->avg
.runnable_avg
+= se
->avg
.runnable_avg
;
4194 cfs_rq
->avg
.runnable_sum
+= se
->avg
.runnable_sum
;
4196 add_tg_cfs_propagate(cfs_rq
, se
->avg
.load_sum
);
4198 cfs_rq_util_change(cfs_rq
, 0);
4200 trace_pelt_cfs_tp(cfs_rq
);
4204 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
4205 * @cfs_rq: cfs_rq to detach from
4206 * @se: sched_entity to detach
4208 * Must call update_cfs_rq_load_avg() before this, since we rely on
4209 * cfs_rq->avg.last_update_time being current.
4211 static void detach_entity_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
4213 dequeue_load_avg(cfs_rq
, se
);
4214 sub_positive(&cfs_rq
->avg
.util_avg
, se
->avg
.util_avg
);
4215 sub_positive(&cfs_rq
->avg
.util_sum
, se
->avg
.util_sum
);
4216 /* See update_cfs_rq_load_avg() */
4217 cfs_rq
->avg
.util_sum
= max_t(u32
, cfs_rq
->avg
.util_sum
,
4218 cfs_rq
->avg
.util_avg
* PELT_MIN_DIVIDER
);
4220 sub_positive(&cfs_rq
->avg
.runnable_avg
, se
->avg
.runnable_avg
);
4221 sub_positive(&cfs_rq
->avg
.runnable_sum
, se
->avg
.runnable_sum
);
4222 /* See update_cfs_rq_load_avg() */
4223 cfs_rq
->avg
.runnable_sum
= max_t(u32
, cfs_rq
->avg
.runnable_sum
,
4224 cfs_rq
->avg
.runnable_avg
* PELT_MIN_DIVIDER
);
4226 add_tg_cfs_propagate(cfs_rq
, -se
->avg
.load_sum
);
4228 cfs_rq_util_change(cfs_rq
, 0);
4230 trace_pelt_cfs_tp(cfs_rq
);
4234 * Optional action to be done while updating the load average
4236 #define UPDATE_TG 0x1
4237 #define SKIP_AGE_LOAD 0x2
4238 #define DO_ATTACH 0x4
4239 #define DO_DETACH 0x8
4241 /* Update task and its cfs_rq load average */
4242 static inline void update_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
4244 u64 now
= cfs_rq_clock_pelt(cfs_rq
);
4248 * Track task load average for carrying it to new CPU after migrated, and
4249 * track group sched_entity load average for task_h_load calc in migration
4251 if (se
->avg
.last_update_time
&& !(flags
& SKIP_AGE_LOAD
))
4252 __update_load_avg_se(now
, cfs_rq
, se
);
4254 decayed
= update_cfs_rq_load_avg(now
, cfs_rq
);
4255 decayed
|= propagate_entity_load_avg(se
);
4257 if (!se
->avg
.last_update_time
&& (flags
& DO_ATTACH
)) {
4260 * DO_ATTACH means we're here from enqueue_entity().
4261 * !last_update_time means we've passed through
4262 * migrate_task_rq_fair() indicating we migrated.
4264 * IOW we're enqueueing a task on a new CPU.
4266 attach_entity_load_avg(cfs_rq
, se
);
4267 update_tg_load_avg(cfs_rq
);
4269 } else if (flags
& DO_DETACH
) {
4271 * DO_DETACH means we're here from dequeue_entity()
4272 * and we are migrating task out of the CPU.
4274 detach_entity_load_avg(cfs_rq
, se
);
4275 update_tg_load_avg(cfs_rq
);
4276 } else if (decayed
) {
4277 cfs_rq_util_change(cfs_rq
, 0);
4279 if (flags
& UPDATE_TG
)
4280 update_tg_load_avg(cfs_rq
);
4285 * Synchronize entity load avg of dequeued entity without locking
4288 static void sync_entity_load_avg(struct sched_entity
*se
)
4290 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
4291 u64 last_update_time
;
4293 last_update_time
= cfs_rq_last_update_time(cfs_rq
);
4294 __update_load_avg_blocked_se(last_update_time
, se
);
4298 * Task first catches up with cfs_rq, and then subtract
4299 * itself from the cfs_rq (task must be off the queue now).
4301 static void remove_entity_load_avg(struct sched_entity
*se
)
4303 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
4304 unsigned long flags
;
4307 * tasks cannot exit without having gone through wake_up_new_task() ->
4308 * enqueue_task_fair() which will have added things to the cfs_rq,
4309 * so we can remove unconditionally.
4312 sync_entity_load_avg(se
);
4314 raw_spin_lock_irqsave(&cfs_rq
->removed
.lock
, flags
);
4315 ++cfs_rq
->removed
.nr
;
4316 cfs_rq
->removed
.util_avg
+= se
->avg
.util_avg
;
4317 cfs_rq
->removed
.load_avg
+= se
->avg
.load_avg
;
4318 cfs_rq
->removed
.runnable_avg
+= se
->avg
.runnable_avg
;
4319 raw_spin_unlock_irqrestore(&cfs_rq
->removed
.lock
, flags
);
4322 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq
*cfs_rq
)
4324 return cfs_rq
->avg
.runnable_avg
;
4327 static inline unsigned long cfs_rq_load_avg(struct cfs_rq
*cfs_rq
)
4329 return cfs_rq
->avg
.load_avg
;
4332 static int newidle_balance(struct rq
*this_rq
, struct rq_flags
*rf
);
4334 static inline unsigned long task_util(struct task_struct
*p
)
4336 return READ_ONCE(p
->se
.avg
.util_avg
);
4339 static inline unsigned long _task_util_est(struct task_struct
*p
)
4341 struct util_est ue
= READ_ONCE(p
->se
.avg
.util_est
);
4343 return max(ue
.ewma
, (ue
.enqueued
& ~UTIL_AVG_UNCHANGED
));
4346 static inline unsigned long task_util_est(struct task_struct
*p
)
4348 return max(task_util(p
), _task_util_est(p
));
4351 #ifdef CONFIG_UCLAMP_TASK
4352 static inline unsigned long uclamp_task_util(struct task_struct
*p
,
4353 unsigned long uclamp_min
,
4354 unsigned long uclamp_max
)
4356 return clamp(task_util_est(p
), uclamp_min
, uclamp_max
);
4359 static inline unsigned long uclamp_task_util(struct task_struct
*p
,
4360 unsigned long uclamp_min
,
4361 unsigned long uclamp_max
)
4363 return task_util_est(p
);
4367 static inline void util_est_enqueue(struct cfs_rq
*cfs_rq
,
4368 struct task_struct
*p
)
4370 unsigned int enqueued
;
4372 if (!sched_feat(UTIL_EST
))
4375 /* Update root cfs_rq's estimated utilization */
4376 enqueued
= cfs_rq
->avg
.util_est
.enqueued
;
4377 enqueued
+= _task_util_est(p
);
4378 WRITE_ONCE(cfs_rq
->avg
.util_est
.enqueued
, enqueued
);
4380 trace_sched_util_est_cfs_tp(cfs_rq
);
4383 static inline void util_est_dequeue(struct cfs_rq
*cfs_rq
,
4384 struct task_struct
*p
)
4386 unsigned int enqueued
;
4388 if (!sched_feat(UTIL_EST
))
4391 /* Update root cfs_rq's estimated utilization */
4392 enqueued
= cfs_rq
->avg
.util_est
.enqueued
;
4393 enqueued
-= min_t(unsigned int, enqueued
, _task_util_est(p
));
4394 WRITE_ONCE(cfs_rq
->avg
.util_est
.enqueued
, enqueued
);
4396 trace_sched_util_est_cfs_tp(cfs_rq
);
4399 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
4402 * Check if a (signed) value is within a specified (unsigned) margin,
4403 * based on the observation that:
4405 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
4407 * NOTE: this only works when value + margin < INT_MAX.
4409 static inline bool within_margin(int value
, int margin
)
4411 return ((unsigned int)(value
+ margin
- 1) < (2 * margin
- 1));
4414 static inline void util_est_update(struct cfs_rq
*cfs_rq
,
4415 struct task_struct
*p
,
4418 long last_ewma_diff
, last_enqueued_diff
;
4421 if (!sched_feat(UTIL_EST
))
4425 * Skip update of task's estimated utilization when the task has not
4426 * yet completed an activation, e.g. being migrated.
4432 * If the PELT values haven't changed since enqueue time,
4433 * skip the util_est update.
4435 ue
= p
->se
.avg
.util_est
;
4436 if (ue
.enqueued
& UTIL_AVG_UNCHANGED
)
4439 last_enqueued_diff
= ue
.enqueued
;
4442 * Reset EWMA on utilization increases, the moving average is used only
4443 * to smooth utilization decreases.
4445 ue
.enqueued
= task_util(p
);
4446 if (sched_feat(UTIL_EST_FASTUP
)) {
4447 if (ue
.ewma
< ue
.enqueued
) {
4448 ue
.ewma
= ue
.enqueued
;
4454 * Skip update of task's estimated utilization when its members are
4455 * already ~1% close to its last activation value.
4457 last_ewma_diff
= ue
.enqueued
- ue
.ewma
;
4458 last_enqueued_diff
-= ue
.enqueued
;
4459 if (within_margin(last_ewma_diff
, UTIL_EST_MARGIN
)) {
4460 if (!within_margin(last_enqueued_diff
, UTIL_EST_MARGIN
))
4467 * To avoid overestimation of actual task utilization, skip updates if
4468 * we cannot grant there is idle time in this CPU.
4470 if (task_util(p
) > capacity_orig_of(cpu_of(rq_of(cfs_rq
))))
4474 * Update Task's estimated utilization
4476 * When *p completes an activation we can consolidate another sample
4477 * of the task size. This is done by storing the current PELT value
4478 * as ue.enqueued and by using this value to update the Exponential
4479 * Weighted Moving Average (EWMA):
4481 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
4482 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
4483 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
4484 * = w * ( last_ewma_diff ) + ewma(t-1)
4485 * = w * (last_ewma_diff + ewma(t-1) / w)
4487 * Where 'w' is the weight of new samples, which is configured to be
4488 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
4490 ue
.ewma
<<= UTIL_EST_WEIGHT_SHIFT
;
4491 ue
.ewma
+= last_ewma_diff
;
4492 ue
.ewma
>>= UTIL_EST_WEIGHT_SHIFT
;
4494 ue
.enqueued
|= UTIL_AVG_UNCHANGED
;
4495 WRITE_ONCE(p
->se
.avg
.util_est
, ue
);
4497 trace_sched_util_est_se_tp(&p
->se
);
4500 static inline int util_fits_cpu(unsigned long util
,
4501 unsigned long uclamp_min
,
4502 unsigned long uclamp_max
,
4505 unsigned long capacity_orig
, capacity_orig_thermal
;
4506 unsigned long capacity
= capacity_of(cpu
);
4507 bool fits
, uclamp_max_fits
;
4510 * Check if the real util fits without any uclamp boost/cap applied.
4512 fits
= fits_capacity(util
, capacity
);
4514 if (!uclamp_is_used())
4518 * We must use capacity_orig_of() for comparing against uclamp_min and
4519 * uclamp_max. We only care about capacity pressure (by using
4520 * capacity_of()) for comparing against the real util.
4522 * If a task is boosted to 1024 for example, we don't want a tiny
4523 * pressure to skew the check whether it fits a CPU or not.
4525 * Similarly if a task is capped to capacity_orig_of(little_cpu), it
4526 * should fit a little cpu even if there's some pressure.
4528 * Only exception is for thermal pressure since it has a direct impact
4529 * on available OPP of the system.
4531 * We honour it for uclamp_min only as a drop in performance level
4532 * could result in not getting the requested minimum performance level.
4534 * For uclamp_max, we can tolerate a drop in performance level as the
4535 * goal is to cap the task. So it's okay if it's getting less.
4537 capacity_orig
= capacity_orig_of(cpu
);
4538 capacity_orig_thermal
= capacity_orig
- arch_scale_thermal_pressure(cpu
);
4541 * We want to force a task to fit a cpu as implied by uclamp_max.
4542 * But we do have some corner cases to cater for..
4548 * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4551 * | | | | | | | (util somewhere in this region)
4554 * +----------------------------------------
4557 * In the above example if a task is capped to a specific performance
4558 * point, y, then when:
4560 * * util = 80% of x then it does not fit on cpu0 and should migrate
4562 * * util = 80% of y then it is forced to fit on cpu1 to honour
4563 * uclamp_max request.
4565 * which is what we're enforcing here. A task always fits if
4566 * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
4567 * the normal upmigration rules should withhold still.
4569 * Only exception is when we are on max capacity, then we need to be
4570 * careful not to block overutilized state. This is so because:
4572 * 1. There's no concept of capping at max_capacity! We can't go
4573 * beyond this performance level anyway.
4574 * 2. The system is being saturated when we're operating near
4575 * max capacity, it doesn't make sense to block overutilized.
4577 uclamp_max_fits
= (capacity_orig
== SCHED_CAPACITY_SCALE
) && (uclamp_max
== SCHED_CAPACITY_SCALE
);
4578 uclamp_max_fits
= !uclamp_max_fits
&& (uclamp_max
<= capacity_orig
);
4579 fits
= fits
|| uclamp_max_fits
;
4584 * | ___ (region a, capped, util >= uclamp_max)
4586 * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4588 * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max)
4589 * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
4591 * | | | | | | | (region c, boosted, util < uclamp_min)
4592 * +----------------------------------------
4595 * a) If util > uclamp_max, then we're capped, we don't care about
4596 * actual fitness value here. We only care if uclamp_max fits
4597 * capacity without taking margin/pressure into account.
4598 * See comment above.
4600 * b) If uclamp_min <= util <= uclamp_max, then the normal
4601 * fits_capacity() rules apply. Except we need to ensure that we
4602 * enforce we remain within uclamp_max, see comment above.
4604 * c) If util < uclamp_min, then we are boosted. Same as (b) but we
4605 * need to take into account the boosted value fits the CPU without
4606 * taking margin/pressure into account.
4608 * Cases (a) and (b) are handled in the 'fits' variable already. We
4609 * just need to consider an extra check for case (c) after ensuring we
4610 * handle the case uclamp_min > uclamp_max.
4612 uclamp_min
= min(uclamp_min
, uclamp_max
);
4613 if (fits
&& (util
< uclamp_min
) && (uclamp_min
> capacity_orig_thermal
))
4619 static inline int task_fits_cpu(struct task_struct
*p
, int cpu
)
4621 unsigned long uclamp_min
= uclamp_eff_value(p
, UCLAMP_MIN
);
4622 unsigned long uclamp_max
= uclamp_eff_value(p
, UCLAMP_MAX
);
4623 unsigned long util
= task_util_est(p
);
4625 * Return true only if the cpu fully fits the task requirements, which
4626 * include the utilization but also the performance hints.
4628 return (util_fits_cpu(util
, uclamp_min
, uclamp_max
, cpu
) > 0);
4631 static inline void update_misfit_status(struct task_struct
*p
, struct rq
*rq
)
4633 if (!sched_asym_cpucap_active())
4636 if (!p
|| p
->nr_cpus_allowed
== 1) {
4637 rq
->misfit_task_load
= 0;
4641 if (task_fits_cpu(p
, cpu_of(rq
))) {
4642 rq
->misfit_task_load
= 0;
4647 * Make sure that misfit_task_load will not be null even if
4648 * task_h_load() returns 0.
4650 rq
->misfit_task_load
= max_t(unsigned long, task_h_load(p
), 1);
4653 #else /* CONFIG_SMP */
4655 static inline bool cfs_rq_is_decayed(struct cfs_rq
*cfs_rq
)
4660 #define UPDATE_TG 0x0
4661 #define SKIP_AGE_LOAD 0x0
4662 #define DO_ATTACH 0x0
4663 #define DO_DETACH 0x0
4665 static inline void update_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int not_used1
)
4667 cfs_rq_util_change(cfs_rq
, 0);
4670 static inline void remove_entity_load_avg(struct sched_entity
*se
) {}
4673 attach_entity_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
) {}
4675 detach_entity_load_avg(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
) {}
4677 static inline int newidle_balance(struct rq
*rq
, struct rq_flags
*rf
)
4683 util_est_enqueue(struct cfs_rq
*cfs_rq
, struct task_struct
*p
) {}
4686 util_est_dequeue(struct cfs_rq
*cfs_rq
, struct task_struct
*p
) {}
4689 util_est_update(struct cfs_rq
*cfs_rq
, struct task_struct
*p
,
4691 static inline void update_misfit_status(struct task_struct
*p
, struct rq
*rq
) {}
4693 #endif /* CONFIG_SMP */
4695 static void check_spread(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
4697 #ifdef CONFIG_SCHED_DEBUG
4698 s64 d
= se
->vruntime
- cfs_rq
->min_vruntime
;
4703 if (d
> 3*sysctl_sched_latency
)
4704 schedstat_inc(cfs_rq
->nr_spread_over
);
4708 static inline bool entity_is_long_sleeper(struct sched_entity
*se
)
4710 struct cfs_rq
*cfs_rq
;
4713 if (se
->exec_start
== 0)
4716 cfs_rq
= cfs_rq_of(se
);
4718 sleep_time
= rq_clock_task(rq_of(cfs_rq
));
4720 /* Happen while migrating because of clock task divergence */
4721 if (sleep_time
<= se
->exec_start
)
4724 sleep_time
-= se
->exec_start
;
4725 if (sleep_time
> ((1ULL << 63) / scale_load_down(NICE_0_LOAD
)))
4732 place_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int initial
)
4734 u64 vruntime
= cfs_rq
->min_vruntime
;
4737 * The 'current' period is already promised to the current tasks,
4738 * however the extra weight of the new task will slow them down a
4739 * little, place the new task so that it fits in the slot that
4740 * stays open at the end.
4742 if (initial
&& sched_feat(START_DEBIT
))
4743 vruntime
+= sched_vslice(cfs_rq
, se
);
4745 /* sleeps up to a single latency don't count. */
4747 unsigned long thresh
;
4750 thresh
= sysctl_sched_min_granularity
;
4752 thresh
= sysctl_sched_latency
;
4755 * Halve their sleep time's effect, to allow
4756 * for a gentler effect of sleepers:
4758 if (sched_feat(GENTLE_FAIR_SLEEPERS
))
4765 * Pull vruntime of the entity being placed to the base level of
4766 * cfs_rq, to prevent boosting it if placed backwards.
4767 * However, min_vruntime can advance much faster than real time, with
4768 * the extreme being when an entity with the minimal weight always runs
4769 * on the cfs_rq. If the waking entity slept for a long time, its
4770 * vruntime difference from min_vruntime may overflow s64 and their
4771 * comparison may get inversed, so ignore the entity's original
4772 * vruntime in that case.
4773 * The maximal vruntime speedup is given by the ratio of normal to
4774 * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
4775 * When placing a migrated waking entity, its exec_start has been set
4776 * from a different rq. In order to take into account a possible
4777 * divergence between new and prev rq's clocks task because of irq and
4778 * stolen time, we take an additional margin.
4779 * So, cutting off on the sleep time of
4780 * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
4783 if (entity_is_long_sleeper(se
))
4784 se
->vruntime
= vruntime
;
4786 se
->vruntime
= max_vruntime(se
->vruntime
, vruntime
);
4789 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
);
4791 static inline bool cfs_bandwidth_used(void);
4798 * update_min_vruntime()
4799 * vruntime -= min_vruntime
4803 * update_min_vruntime()
4804 * vruntime += min_vruntime
4806 * this way the vruntime transition between RQs is done when both
4807 * min_vruntime are up-to-date.
4811 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
4812 * vruntime -= min_vruntime
4816 * update_min_vruntime()
4817 * vruntime += min_vruntime
4819 * this way we don't have the most up-to-date min_vruntime on the originating
4820 * CPU and an up-to-date min_vruntime on the destination CPU.
4824 enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
4826 bool renorm
= !(flags
& ENQUEUE_WAKEUP
) || (flags
& ENQUEUE_MIGRATED
);
4827 bool curr
= cfs_rq
->curr
== se
;
4830 * If we're the current task, we must renormalise before calling
4834 se
->vruntime
+= cfs_rq
->min_vruntime
;
4836 update_curr(cfs_rq
);
4839 * Otherwise, renormalise after, such that we're placed at the current
4840 * moment in time, instead of some random moment in the past. Being
4841 * placed in the past could significantly boost this task to the
4842 * fairness detriment of existing tasks.
4844 if (renorm
&& !curr
)
4845 se
->vruntime
+= cfs_rq
->min_vruntime
;
4848 * When enqueuing a sched_entity, we must:
4849 * - Update loads to have both entity and cfs_rq synced with now.
4850 * - For group_entity, update its runnable_weight to reflect the new
4851 * h_nr_running of its group cfs_rq.
4852 * - For group_entity, update its weight to reflect the new share of
4854 * - Add its new weight to cfs_rq->load.weight
4856 update_load_avg(cfs_rq
, se
, UPDATE_TG
| DO_ATTACH
);
4857 se_update_runnable(se
);
4858 update_cfs_group(se
);
4859 account_entity_enqueue(cfs_rq
, se
);
4861 if (flags
& ENQUEUE_WAKEUP
)
4862 place_entity(cfs_rq
, se
, 0);
4863 /* Entity has migrated, no longer consider this task hot */
4864 if (flags
& ENQUEUE_MIGRATED
)
4867 check_schedstat_required();
4868 update_stats_enqueue_fair(cfs_rq
, se
, flags
);
4869 check_spread(cfs_rq
, se
);
4871 __enqueue_entity(cfs_rq
, se
);
4874 if (cfs_rq
->nr_running
== 1) {
4875 check_enqueue_throttle(cfs_rq
);
4876 if (!throttled_hierarchy(cfs_rq
))
4877 list_add_leaf_cfs_rq(cfs_rq
);
4881 static void __clear_buddies_last(struct sched_entity
*se
)
4883 for_each_sched_entity(se
) {
4884 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
4885 if (cfs_rq
->last
!= se
)
4888 cfs_rq
->last
= NULL
;
4892 static void __clear_buddies_next(struct sched_entity
*se
)
4894 for_each_sched_entity(se
) {
4895 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
4896 if (cfs_rq
->next
!= se
)
4899 cfs_rq
->next
= NULL
;
4903 static void __clear_buddies_skip(struct sched_entity
*se
)
4905 for_each_sched_entity(se
) {
4906 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
4907 if (cfs_rq
->skip
!= se
)
4910 cfs_rq
->skip
= NULL
;
4914 static void clear_buddies(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
4916 if (cfs_rq
->last
== se
)
4917 __clear_buddies_last(se
);
4919 if (cfs_rq
->next
== se
)
4920 __clear_buddies_next(se
);
4922 if (cfs_rq
->skip
== se
)
4923 __clear_buddies_skip(se
);
4926 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
4929 dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int flags
)
4931 int action
= UPDATE_TG
;
4933 if (entity_is_task(se
) && task_on_rq_migrating(task_of(se
)))
4934 action
|= DO_DETACH
;
4937 * Update run-time statistics of the 'current'.
4939 update_curr(cfs_rq
);
4942 * When dequeuing a sched_entity, we must:
4943 * - Update loads to have both entity and cfs_rq synced with now.
4944 * - For group_entity, update its runnable_weight to reflect the new
4945 * h_nr_running of its group cfs_rq.
4946 * - Subtract its previous weight from cfs_rq->load.weight.
4947 * - For group entity, update its weight to reflect the new share
4948 * of its group cfs_rq.
4950 update_load_avg(cfs_rq
, se
, action
);
4951 se_update_runnable(se
);
4953 update_stats_dequeue_fair(cfs_rq
, se
, flags
);
4955 clear_buddies(cfs_rq
, se
);
4957 if (se
!= cfs_rq
->curr
)
4958 __dequeue_entity(cfs_rq
, se
);
4960 account_entity_dequeue(cfs_rq
, se
);
4963 * Normalize after update_curr(); which will also have moved
4964 * min_vruntime if @se is the one holding it back. But before doing
4965 * update_min_vruntime() again, which will discount @se's position and
4966 * can move min_vruntime forward still more.
4968 if (!(flags
& DEQUEUE_SLEEP
))
4969 se
->vruntime
-= cfs_rq
->min_vruntime
;
4971 /* return excess runtime on last dequeue */
4972 return_cfs_rq_runtime(cfs_rq
);
4974 update_cfs_group(se
);
4977 * Now advance min_vruntime if @se was the entity holding it back,
4978 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
4979 * put back on, and if we advance min_vruntime, we'll be placed back
4980 * further than we started -- ie. we'll be penalized.
4982 if ((flags
& (DEQUEUE_SAVE
| DEQUEUE_MOVE
)) != DEQUEUE_SAVE
)
4983 update_min_vruntime(cfs_rq
);
4985 if (cfs_rq
->nr_running
== 0)
4986 update_idle_cfs_rq_clock_pelt(cfs_rq
);
4990 * Preempt the current task with a newly woken task if needed:
4993 check_preempt_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
)
4995 unsigned long ideal_runtime
, delta_exec
;
4996 struct sched_entity
*se
;
5000 * When many tasks blow up the sched_period; it is possible that
5001 * sched_slice() reports unusually large results (when many tasks are
5002 * very light for example). Therefore impose a maximum.
5004 ideal_runtime
= min_t(u64
, sched_slice(cfs_rq
, curr
), sysctl_sched_latency
);
5006 delta_exec
= curr
->sum_exec_runtime
- curr
->prev_sum_exec_runtime
;
5007 if (delta_exec
> ideal_runtime
) {
5008 resched_curr(rq_of(cfs_rq
));
5010 * The current task ran long enough, ensure it doesn't get
5011 * re-elected due to buddy favours.
5013 clear_buddies(cfs_rq
, curr
);
5018 * Ensure that a task that missed wakeup preemption by a
5019 * narrow margin doesn't have to wait for a full slice.
5020 * This also mitigates buddy induced latencies under load.
5022 if (delta_exec
< sysctl_sched_min_granularity
)
5025 se
= __pick_first_entity(cfs_rq
);
5026 delta
= curr
->vruntime
- se
->vruntime
;
5031 if (delta
> ideal_runtime
)
5032 resched_curr(rq_of(cfs_rq
));
5036 set_next_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
5038 clear_buddies(cfs_rq
, se
);
5040 /* 'current' is not kept within the tree. */
5043 * Any task has to be enqueued before it get to execute on
5044 * a CPU. So account for the time it spent waiting on the
5047 update_stats_wait_end_fair(cfs_rq
, se
);
5048 __dequeue_entity(cfs_rq
, se
);
5049 update_load_avg(cfs_rq
, se
, UPDATE_TG
);
5052 update_stats_curr_start(cfs_rq
, se
);
5056 * Track our maximum slice length, if the CPU's load is at
5057 * least twice that of our own weight (i.e. dont track it
5058 * when there are only lesser-weight tasks around):
5060 if (schedstat_enabled() &&
5061 rq_of(cfs_rq
)->cfs
.load
.weight
>= 2*se
->load
.weight
) {
5062 struct sched_statistics
*stats
;
5064 stats
= __schedstats_from_se(se
);
5065 __schedstat_set(stats
->slice_max
,
5066 max((u64
)stats
->slice_max
,
5067 se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
));
5070 se
->prev_sum_exec_runtime
= se
->sum_exec_runtime
;
5074 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
);
5077 * Pick the next process, keeping these things in mind, in this order:
5078 * 1) keep things fair between processes/task groups
5079 * 2) pick the "next" process, since someone really wants that to run
5080 * 3) pick the "last" process, for cache locality
5081 * 4) do not run the "skip" process, if something else is available
5083 static struct sched_entity
*
5084 pick_next_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
)
5086 struct sched_entity
*left
= __pick_first_entity(cfs_rq
);
5087 struct sched_entity
*se
;
5090 * If curr is set we have to see if its left of the leftmost entity
5091 * still in the tree, provided there was anything in the tree at all.
5093 if (!left
|| (curr
&& entity_before(curr
, left
)))
5096 se
= left
; /* ideally we run the leftmost entity */
5099 * Avoid running the skip buddy, if running something else can
5100 * be done without getting too unfair.
5102 if (cfs_rq
->skip
&& cfs_rq
->skip
== se
) {
5103 struct sched_entity
*second
;
5106 second
= __pick_first_entity(cfs_rq
);
5108 second
= __pick_next_entity(se
);
5109 if (!second
|| (curr
&& entity_before(curr
, second
)))
5113 if (second
&& wakeup_preempt_entity(second
, left
) < 1)
5117 if (cfs_rq
->next
&& wakeup_preempt_entity(cfs_rq
->next
, left
) < 1) {
5119 * Someone really wants this to run. If it's not unfair, run it.
5122 } else if (cfs_rq
->last
&& wakeup_preempt_entity(cfs_rq
->last
, left
) < 1) {
5124 * Prefer last buddy, try to return the CPU to a preempted task.
5132 static bool check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
);
5134 static void put_prev_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*prev
)
5137 * If still on the runqueue then deactivate_task()
5138 * was not called and update_curr() has to be done:
5141 update_curr(cfs_rq
);
5143 /* throttle cfs_rqs exceeding runtime */
5144 check_cfs_rq_runtime(cfs_rq
);
5146 check_spread(cfs_rq
, prev
);
5149 update_stats_wait_start_fair(cfs_rq
, prev
);
5150 /* Put 'current' back into the tree. */
5151 __enqueue_entity(cfs_rq
, prev
);
5152 /* in !on_rq case, update occurred at dequeue */
5153 update_load_avg(cfs_rq
, prev
, 0);
5155 cfs_rq
->curr
= NULL
;
5159 entity_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
, int queued
)
5162 * Update run-time statistics of the 'current'.
5164 update_curr(cfs_rq
);
5167 * Ensure that runnable average is periodically updated.
5169 update_load_avg(cfs_rq
, curr
, UPDATE_TG
);
5170 update_cfs_group(curr
);
5172 #ifdef CONFIG_SCHED_HRTICK
5174 * queued ticks are scheduled to match the slice, so don't bother
5175 * validating it and just reschedule.
5178 resched_curr(rq_of(cfs_rq
));
5182 * don't let the period tick interfere with the hrtick preemption
5184 if (!sched_feat(DOUBLE_TICK
) &&
5185 hrtimer_active(&rq_of(cfs_rq
)->hrtick_timer
))
5189 if (cfs_rq
->nr_running
> 1)
5190 check_preempt_tick(cfs_rq
, curr
);
5194 /**************************************************
5195 * CFS bandwidth control machinery
5198 #ifdef CONFIG_CFS_BANDWIDTH
5200 #ifdef CONFIG_JUMP_LABEL
5201 static struct static_key __cfs_bandwidth_used
;
5203 static inline bool cfs_bandwidth_used(void)
5205 return static_key_false(&__cfs_bandwidth_used
);
5208 void cfs_bandwidth_usage_inc(void)
5210 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used
);
5213 void cfs_bandwidth_usage_dec(void)
5215 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used
);
5217 #else /* CONFIG_JUMP_LABEL */
5218 static bool cfs_bandwidth_used(void)
5223 void cfs_bandwidth_usage_inc(void) {}
5224 void cfs_bandwidth_usage_dec(void) {}
5225 #endif /* CONFIG_JUMP_LABEL */
5228 * default period for cfs group bandwidth.
5229 * default: 0.1s, units: nanoseconds
5231 static inline u64
default_cfs_period(void)
5233 return 100000000ULL;
5236 static inline u64
sched_cfs_bandwidth_slice(void)
5238 return (u64
)sysctl_sched_cfs_bandwidth_slice
* NSEC_PER_USEC
;
5242 * Replenish runtime according to assigned quota. We use sched_clock_cpu
5243 * directly instead of rq->clock to avoid adding additional synchronization
5246 * requires cfs_b->lock
5248 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth
*cfs_b
)
5252 if (unlikely(cfs_b
->quota
== RUNTIME_INF
))
5255 cfs_b
->runtime
+= cfs_b
->quota
;
5256 runtime
= cfs_b
->runtime_snap
- cfs_b
->runtime
;
5258 cfs_b
->burst_time
+= runtime
;
5262 cfs_b
->runtime
= min(cfs_b
->runtime
, cfs_b
->quota
+ cfs_b
->burst
);
5263 cfs_b
->runtime_snap
= cfs_b
->runtime
;
5266 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
5268 return &tg
->cfs_bandwidth
;
5271 /* returns 0 on failure to allocate runtime */
5272 static int __assign_cfs_rq_runtime(struct cfs_bandwidth
*cfs_b
,
5273 struct cfs_rq
*cfs_rq
, u64 target_runtime
)
5275 u64 min_amount
, amount
= 0;
5277 lockdep_assert_held(&cfs_b
->lock
);
5279 /* note: this is a positive sum as runtime_remaining <= 0 */
5280 min_amount
= target_runtime
- cfs_rq
->runtime_remaining
;
5282 if (cfs_b
->quota
== RUNTIME_INF
)
5283 amount
= min_amount
;
5285 start_cfs_bandwidth(cfs_b
);
5287 if (cfs_b
->runtime
> 0) {
5288 amount
= min(cfs_b
->runtime
, min_amount
);
5289 cfs_b
->runtime
-= amount
;
5294 cfs_rq
->runtime_remaining
+= amount
;
5296 return cfs_rq
->runtime_remaining
> 0;
5299 /* returns 0 on failure to allocate runtime */
5300 static int assign_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
5302 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
5305 raw_spin_lock(&cfs_b
->lock
);
5306 ret
= __assign_cfs_rq_runtime(cfs_b
, cfs_rq
, sched_cfs_bandwidth_slice());
5307 raw_spin_unlock(&cfs_b
->lock
);
5312 static void __account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, u64 delta_exec
)
5314 /* dock delta_exec before expiring quota (as it could span periods) */
5315 cfs_rq
->runtime_remaining
-= delta_exec
;
5317 if (likely(cfs_rq
->runtime_remaining
> 0))
5320 if (cfs_rq
->throttled
)
5323 * if we're unable to extend our runtime we resched so that the active
5324 * hierarchy can be throttled
5326 if (!assign_cfs_rq_runtime(cfs_rq
) && likely(cfs_rq
->curr
))
5327 resched_curr(rq_of(cfs_rq
));
5330 static __always_inline
5331 void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, u64 delta_exec
)
5333 if (!cfs_bandwidth_used() || !cfs_rq
->runtime_enabled
)
5336 __account_cfs_rq_runtime(cfs_rq
, delta_exec
);
5339 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
5341 return cfs_bandwidth_used() && cfs_rq
->throttled
;
5344 /* check whether cfs_rq, or any parent, is throttled */
5345 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
5347 return cfs_bandwidth_used() && cfs_rq
->throttle_count
;
5351 * Ensure that neither of the group entities corresponding to src_cpu or
5352 * dest_cpu are members of a throttled hierarchy when performing group
5353 * load-balance operations.
5355 static inline int throttled_lb_pair(struct task_group
*tg
,
5356 int src_cpu
, int dest_cpu
)
5358 struct cfs_rq
*src_cfs_rq
, *dest_cfs_rq
;
5360 src_cfs_rq
= tg
->cfs_rq
[src_cpu
];
5361 dest_cfs_rq
= tg
->cfs_rq
[dest_cpu
];
5363 return throttled_hierarchy(src_cfs_rq
) ||
5364 throttled_hierarchy(dest_cfs_rq
);
5367 static int tg_unthrottle_up(struct task_group
*tg
, void *data
)
5369 struct rq
*rq
= data
;
5370 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
5372 cfs_rq
->throttle_count
--;
5373 if (!cfs_rq
->throttle_count
) {
5374 cfs_rq
->throttled_clock_pelt_time
+= rq_clock_pelt(rq
) -
5375 cfs_rq
->throttled_clock_pelt
;
5377 /* Add cfs_rq with load or one or more already running entities to the list */
5378 if (!cfs_rq_is_decayed(cfs_rq
))
5379 list_add_leaf_cfs_rq(cfs_rq
);
5385 static int tg_throttle_down(struct task_group
*tg
, void *data
)
5387 struct rq
*rq
= data
;
5388 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
5390 /* group is entering throttled state, stop time */
5391 if (!cfs_rq
->throttle_count
) {
5392 cfs_rq
->throttled_clock_pelt
= rq_clock_pelt(rq
);
5393 list_del_leaf_cfs_rq(cfs_rq
);
5395 cfs_rq
->throttle_count
++;
5400 static bool throttle_cfs_rq(struct cfs_rq
*cfs_rq
)
5402 struct rq
*rq
= rq_of(cfs_rq
);
5403 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
5404 struct sched_entity
*se
;
5405 long task_delta
, idle_task_delta
, dequeue
= 1;
5407 raw_spin_lock(&cfs_b
->lock
);
5408 /* This will start the period timer if necessary */
5409 if (__assign_cfs_rq_runtime(cfs_b
, cfs_rq
, 1)) {
5411 * We have raced with bandwidth becoming available, and if we
5412 * actually throttled the timer might not unthrottle us for an
5413 * entire period. We additionally needed to make sure that any
5414 * subsequent check_cfs_rq_runtime calls agree not to throttle
5415 * us, as we may commit to do cfs put_prev+pick_next, so we ask
5416 * for 1ns of runtime rather than just check cfs_b.
5420 list_add_tail_rcu(&cfs_rq
->throttled_list
,
5421 &cfs_b
->throttled_cfs_rq
);
5423 raw_spin_unlock(&cfs_b
->lock
);
5426 return false; /* Throttle no longer required. */
5428 se
= cfs_rq
->tg
->se
[cpu_of(rq_of(cfs_rq
))];
5430 /* freeze hierarchy runnable averages while throttled */
5432 walk_tg_tree_from(cfs_rq
->tg
, tg_throttle_down
, tg_nop
, (void *)rq
);
5435 task_delta
= cfs_rq
->h_nr_running
;
5436 idle_task_delta
= cfs_rq
->idle_h_nr_running
;
5437 for_each_sched_entity(se
) {
5438 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
5439 /* throttled entity or throttle-on-deactivate */
5443 dequeue_entity(qcfs_rq
, se
, DEQUEUE_SLEEP
);
5445 if (cfs_rq_is_idle(group_cfs_rq(se
)))
5446 idle_task_delta
= cfs_rq
->h_nr_running
;
5448 qcfs_rq
->h_nr_running
-= task_delta
;
5449 qcfs_rq
->idle_h_nr_running
-= idle_task_delta
;
5451 if (qcfs_rq
->load
.weight
) {
5452 /* Avoid re-evaluating load for this entity: */
5453 se
= parent_entity(se
);
5458 for_each_sched_entity(se
) {
5459 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
5460 /* throttled entity or throttle-on-deactivate */
5464 update_load_avg(qcfs_rq
, se
, 0);
5465 se_update_runnable(se
);
5467 if (cfs_rq_is_idle(group_cfs_rq(se
)))
5468 idle_task_delta
= cfs_rq
->h_nr_running
;
5470 qcfs_rq
->h_nr_running
-= task_delta
;
5471 qcfs_rq
->idle_h_nr_running
-= idle_task_delta
;
5474 /* At this point se is NULL and we are at root level*/
5475 sub_nr_running(rq
, task_delta
);
5479 * Note: distribution will already see us throttled via the
5480 * throttled-list. rq->lock protects completion.
5482 cfs_rq
->throttled
= 1;
5483 cfs_rq
->throttled_clock
= rq_clock(rq
);
5487 void unthrottle_cfs_rq(struct cfs_rq
*cfs_rq
)
5489 struct rq
*rq
= rq_of(cfs_rq
);
5490 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
5491 struct sched_entity
*se
;
5492 long task_delta
, idle_task_delta
;
5494 se
= cfs_rq
->tg
->se
[cpu_of(rq
)];
5496 cfs_rq
->throttled
= 0;
5498 update_rq_clock(rq
);
5500 raw_spin_lock(&cfs_b
->lock
);
5501 cfs_b
->throttled_time
+= rq_clock(rq
) - cfs_rq
->throttled_clock
;
5502 list_del_rcu(&cfs_rq
->throttled_list
);
5503 raw_spin_unlock(&cfs_b
->lock
);
5505 /* update hierarchical throttle state */
5506 walk_tg_tree_from(cfs_rq
->tg
, tg_nop
, tg_unthrottle_up
, (void *)rq
);
5508 if (!cfs_rq
->load
.weight
) {
5509 if (!cfs_rq
->on_list
)
5512 * Nothing to run but something to decay (on_list)?
5513 * Complete the branch.
5515 for_each_sched_entity(se
) {
5516 if (list_add_leaf_cfs_rq(cfs_rq_of(se
)))
5519 goto unthrottle_throttle
;
5522 task_delta
= cfs_rq
->h_nr_running
;
5523 idle_task_delta
= cfs_rq
->idle_h_nr_running
;
5524 for_each_sched_entity(se
) {
5525 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
5529 enqueue_entity(qcfs_rq
, se
, ENQUEUE_WAKEUP
);
5531 if (cfs_rq_is_idle(group_cfs_rq(se
)))
5532 idle_task_delta
= cfs_rq
->h_nr_running
;
5534 qcfs_rq
->h_nr_running
+= task_delta
;
5535 qcfs_rq
->idle_h_nr_running
+= idle_task_delta
;
5537 /* end evaluation on encountering a throttled cfs_rq */
5538 if (cfs_rq_throttled(qcfs_rq
))
5539 goto unthrottle_throttle
;
5542 for_each_sched_entity(se
) {
5543 struct cfs_rq
*qcfs_rq
= cfs_rq_of(se
);
5545 update_load_avg(qcfs_rq
, se
, UPDATE_TG
);
5546 se_update_runnable(se
);
5548 if (cfs_rq_is_idle(group_cfs_rq(se
)))
5549 idle_task_delta
= cfs_rq
->h_nr_running
;
5551 qcfs_rq
->h_nr_running
+= task_delta
;
5552 qcfs_rq
->idle_h_nr_running
+= idle_task_delta
;
5554 /* end evaluation on encountering a throttled cfs_rq */
5555 if (cfs_rq_throttled(qcfs_rq
))
5556 goto unthrottle_throttle
;
5559 /* At this point se is NULL and we are at root level*/
5560 add_nr_running(rq
, task_delta
);
5562 unthrottle_throttle
:
5563 assert_list_leaf_cfs_rq(rq
);
5565 /* Determine whether we need to wake up potentially idle CPU: */
5566 if (rq
->curr
== rq
->idle
&& rq
->cfs
.nr_running
)
5571 static void __cfsb_csd_unthrottle(void *arg
)
5573 struct cfs_rq
*cursor
, *tmp
;
5574 struct rq
*rq
= arg
;
5580 * Since we hold rq lock we're safe from concurrent manipulation of
5581 * the CSD list. However, this RCU critical section annotates the
5582 * fact that we pair with sched_free_group_rcu(), so that we cannot
5583 * race with group being freed in the window between removing it
5584 * from the list and advancing to the next entry in the list.
5588 list_for_each_entry_safe(cursor
, tmp
, &rq
->cfsb_csd_list
,
5589 throttled_csd_list
) {
5590 list_del_init(&cursor
->throttled_csd_list
);
5592 if (cfs_rq_throttled(cursor
))
5593 unthrottle_cfs_rq(cursor
);
5601 static inline void __unthrottle_cfs_rq_async(struct cfs_rq
*cfs_rq
)
5603 struct rq
*rq
= rq_of(cfs_rq
);
5606 if (rq
== this_rq()) {
5607 unthrottle_cfs_rq(cfs_rq
);
5611 /* Already enqueued */
5612 if (SCHED_WARN_ON(!list_empty(&cfs_rq
->throttled_csd_list
)))
5615 first
= list_empty(&rq
->cfsb_csd_list
);
5616 list_add_tail(&cfs_rq
->throttled_csd_list
, &rq
->cfsb_csd_list
);
5618 smp_call_function_single_async(cpu_of(rq
), &rq
->cfsb_csd
);
5621 static inline void __unthrottle_cfs_rq_async(struct cfs_rq
*cfs_rq
)
5623 unthrottle_cfs_rq(cfs_rq
);
5627 static void unthrottle_cfs_rq_async(struct cfs_rq
*cfs_rq
)
5629 lockdep_assert_rq_held(rq_of(cfs_rq
));
5631 if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq
) ||
5632 cfs_rq
->runtime_remaining
<= 0))
5635 __unthrottle_cfs_rq_async(cfs_rq
);
5638 static bool distribute_cfs_runtime(struct cfs_bandwidth
*cfs_b
)
5640 struct cfs_rq
*local_unthrottle
= NULL
;
5641 int this_cpu
= smp_processor_id();
5642 u64 runtime
, remaining
= 1;
5643 bool throttled
= false;
5644 struct cfs_rq
*cfs_rq
;
5649 list_for_each_entry_rcu(cfs_rq
, &cfs_b
->throttled_cfs_rq
,
5658 rq_lock_irqsave(rq
, &rf
);
5659 if (!cfs_rq_throttled(cfs_rq
))
5663 /* Already queued for async unthrottle */
5664 if (!list_empty(&cfs_rq
->throttled_csd_list
))
5668 /* By the above checks, this should never be true */
5669 SCHED_WARN_ON(cfs_rq
->runtime_remaining
> 0);
5671 raw_spin_lock(&cfs_b
->lock
);
5672 runtime
= -cfs_rq
->runtime_remaining
+ 1;
5673 if (runtime
> cfs_b
->runtime
)
5674 runtime
= cfs_b
->runtime
;
5675 cfs_b
->runtime
-= runtime
;
5676 remaining
= cfs_b
->runtime
;
5677 raw_spin_unlock(&cfs_b
->lock
);
5679 cfs_rq
->runtime_remaining
+= runtime
;
5681 /* we check whether we're throttled above */
5682 if (cfs_rq
->runtime_remaining
> 0) {
5683 if (cpu_of(rq
) != this_cpu
||
5684 SCHED_WARN_ON(local_unthrottle
))
5685 unthrottle_cfs_rq_async(cfs_rq
);
5687 local_unthrottle
= cfs_rq
;
5693 rq_unlock_irqrestore(rq
, &rf
);
5697 if (local_unthrottle
) {
5698 rq
= cpu_rq(this_cpu
);
5699 rq_lock_irqsave(rq
, &rf
);
5700 if (cfs_rq_throttled(local_unthrottle
))
5701 unthrottle_cfs_rq(local_unthrottle
);
5702 rq_unlock_irqrestore(rq
, &rf
);
5709 * Responsible for refilling a task_group's bandwidth and unthrottling its
5710 * cfs_rqs as appropriate. If there has been no activity within the last
5711 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
5712 * used to track this state.
5714 static int do_sched_cfs_period_timer(struct cfs_bandwidth
*cfs_b
, int overrun
, unsigned long flags
)
5718 /* no need to continue the timer with no bandwidth constraint */
5719 if (cfs_b
->quota
== RUNTIME_INF
)
5720 goto out_deactivate
;
5722 throttled
= !list_empty(&cfs_b
->throttled_cfs_rq
);
5723 cfs_b
->nr_periods
+= overrun
;
5725 /* Refill extra burst quota even if cfs_b->idle */
5726 __refill_cfs_bandwidth_runtime(cfs_b
);
5729 * idle depends on !throttled (for the case of a large deficit), and if
5730 * we're going inactive then everything else can be deferred
5732 if (cfs_b
->idle
&& !throttled
)
5733 goto out_deactivate
;
5736 /* mark as potentially idle for the upcoming period */
5741 /* account preceding periods in which throttling occurred */
5742 cfs_b
->nr_throttled
+= overrun
;
5745 * This check is repeated as we release cfs_b->lock while we unthrottle.
5747 while (throttled
&& cfs_b
->runtime
> 0) {
5748 raw_spin_unlock_irqrestore(&cfs_b
->lock
, flags
);
5749 /* we can't nest cfs_b->lock while distributing bandwidth */
5750 throttled
= distribute_cfs_runtime(cfs_b
);
5751 raw_spin_lock_irqsave(&cfs_b
->lock
, flags
);
5755 * While we are ensured activity in the period following an
5756 * unthrottle, this also covers the case in which the new bandwidth is
5757 * insufficient to cover the existing bandwidth deficit. (Forcing the
5758 * timer to remain active while there are any throttled entities.)
5768 /* a cfs_rq won't donate quota below this amount */
5769 static const u64 min_cfs_rq_runtime
= 1 * NSEC_PER_MSEC
;
5770 /* minimum remaining period time to redistribute slack quota */
5771 static const u64 min_bandwidth_expiration
= 2 * NSEC_PER_MSEC
;
5772 /* how long we wait to gather additional slack before distributing */
5773 static const u64 cfs_bandwidth_slack_period
= 5 * NSEC_PER_MSEC
;
5776 * Are we near the end of the current quota period?
5778 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
5779 * hrtimer base being cleared by hrtimer_start. In the case of
5780 * migrate_hrtimers, base is never cleared, so we are fine.
5782 static int runtime_refresh_within(struct cfs_bandwidth
*cfs_b
, u64 min_expire
)
5784 struct hrtimer
*refresh_timer
= &cfs_b
->period_timer
;
5787 /* if the call-back is running a quota refresh is already occurring */
5788 if (hrtimer_callback_running(refresh_timer
))
5791 /* is a quota refresh about to occur? */
5792 remaining
= ktime_to_ns(hrtimer_expires_remaining(refresh_timer
));
5793 if (remaining
< (s64
)min_expire
)
5799 static void start_cfs_slack_bandwidth(struct cfs_bandwidth
*cfs_b
)
5801 u64 min_left
= cfs_bandwidth_slack_period
+ min_bandwidth_expiration
;
5803 /* if there's a quota refresh soon don't bother with slack */
5804 if (runtime_refresh_within(cfs_b
, min_left
))
5807 /* don't push forwards an existing deferred unthrottle */
5808 if (cfs_b
->slack_started
)
5810 cfs_b
->slack_started
= true;
5812 hrtimer_start(&cfs_b
->slack_timer
,
5813 ns_to_ktime(cfs_bandwidth_slack_period
),
5817 /* we know any runtime found here is valid as update_curr() precedes return */
5818 static void __return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
5820 struct cfs_bandwidth
*cfs_b
= tg_cfs_bandwidth(cfs_rq
->tg
);
5821 s64 slack_runtime
= cfs_rq
->runtime_remaining
- min_cfs_rq_runtime
;
5823 if (slack_runtime
<= 0)
5826 raw_spin_lock(&cfs_b
->lock
);
5827 if (cfs_b
->quota
!= RUNTIME_INF
) {
5828 cfs_b
->runtime
+= slack_runtime
;
5830 /* we are under rq->lock, defer unthrottling using a timer */
5831 if (cfs_b
->runtime
> sched_cfs_bandwidth_slice() &&
5832 !list_empty(&cfs_b
->throttled_cfs_rq
))
5833 start_cfs_slack_bandwidth(cfs_b
);
5835 raw_spin_unlock(&cfs_b
->lock
);
5837 /* even if it's not valid for return we don't want to try again */
5838 cfs_rq
->runtime_remaining
-= slack_runtime
;
5841 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
5843 if (!cfs_bandwidth_used())
5846 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->nr_running
)
5849 __return_cfs_rq_runtime(cfs_rq
);
5853 * This is done with a timer (instead of inline with bandwidth return) since
5854 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
5856 static void do_sched_cfs_slack_timer(struct cfs_bandwidth
*cfs_b
)
5858 u64 runtime
= 0, slice
= sched_cfs_bandwidth_slice();
5859 unsigned long flags
;
5861 /* confirm we're still not at a refresh boundary */
5862 raw_spin_lock_irqsave(&cfs_b
->lock
, flags
);
5863 cfs_b
->slack_started
= false;
5865 if (runtime_refresh_within(cfs_b
, min_bandwidth_expiration
)) {
5866 raw_spin_unlock_irqrestore(&cfs_b
->lock
, flags
);
5870 if (cfs_b
->quota
!= RUNTIME_INF
&& cfs_b
->runtime
> slice
)
5871 runtime
= cfs_b
->runtime
;
5873 raw_spin_unlock_irqrestore(&cfs_b
->lock
, flags
);
5878 distribute_cfs_runtime(cfs_b
);
5882 * When a group wakes up we want to make sure that its quota is not already
5883 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
5884 * runtime as update_curr() throttling can not trigger until it's on-rq.
5886 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
)
5888 if (!cfs_bandwidth_used())
5891 /* an active group must be handled by the update_curr()->put() path */
5892 if (!cfs_rq
->runtime_enabled
|| cfs_rq
->curr
)
5895 /* ensure the group is not already throttled */
5896 if (cfs_rq_throttled(cfs_rq
))
5899 /* update runtime allocation */
5900 account_cfs_rq_runtime(cfs_rq
, 0);
5901 if (cfs_rq
->runtime_remaining
<= 0)
5902 throttle_cfs_rq(cfs_rq
);
5905 static void sync_throttle(struct task_group
*tg
, int cpu
)
5907 struct cfs_rq
*pcfs_rq
, *cfs_rq
;
5909 if (!cfs_bandwidth_used())
5915 cfs_rq
= tg
->cfs_rq
[cpu
];
5916 pcfs_rq
= tg
->parent
->cfs_rq
[cpu
];
5918 cfs_rq
->throttle_count
= pcfs_rq
->throttle_count
;
5919 cfs_rq
->throttled_clock_pelt
= rq_clock_pelt(cpu_rq(cpu
));
5922 /* conditionally throttle active cfs_rq's from put_prev_entity() */
5923 static bool check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
5925 if (!cfs_bandwidth_used())
5928 if (likely(!cfs_rq
->runtime_enabled
|| cfs_rq
->runtime_remaining
> 0))
5932 * it's possible for a throttled entity to be forced into a running
5933 * state (e.g. set_curr_task), in this case we're finished.
5935 if (cfs_rq_throttled(cfs_rq
))
5938 return throttle_cfs_rq(cfs_rq
);
5941 static enum hrtimer_restart
sched_cfs_slack_timer(struct hrtimer
*timer
)
5943 struct cfs_bandwidth
*cfs_b
=
5944 container_of(timer
, struct cfs_bandwidth
, slack_timer
);
5946 do_sched_cfs_slack_timer(cfs_b
);
5948 return HRTIMER_NORESTART
;
5951 extern const u64 max_cfs_quota_period
;
5953 static enum hrtimer_restart
sched_cfs_period_timer(struct hrtimer
*timer
)
5955 struct cfs_bandwidth
*cfs_b
=
5956 container_of(timer
, struct cfs_bandwidth
, period_timer
);
5957 unsigned long flags
;
5962 raw_spin_lock_irqsave(&cfs_b
->lock
, flags
);
5964 overrun
= hrtimer_forward_now(timer
, cfs_b
->period
);
5968 idle
= do_sched_cfs_period_timer(cfs_b
, overrun
, flags
);
5971 u64
new, old
= ktime_to_ns(cfs_b
->period
);
5974 * Grow period by a factor of 2 to avoid losing precision.
5975 * Precision loss in the quota/period ratio can cause __cfs_schedulable
5979 if (new < max_cfs_quota_period
) {
5980 cfs_b
->period
= ns_to_ktime(new);
5984 pr_warn_ratelimited(
5985 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5987 div_u64(new, NSEC_PER_USEC
),
5988 div_u64(cfs_b
->quota
, NSEC_PER_USEC
));
5990 pr_warn_ratelimited(
5991 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
5993 div_u64(old
, NSEC_PER_USEC
),
5994 div_u64(cfs_b
->quota
, NSEC_PER_USEC
));
5997 /* reset count so we don't come right back in here */
6002 cfs_b
->period_active
= 0;
6003 raw_spin_unlock_irqrestore(&cfs_b
->lock
, flags
);
6005 return idle
? HRTIMER_NORESTART
: HRTIMER_RESTART
;
6008 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
6010 raw_spin_lock_init(&cfs_b
->lock
);
6012 cfs_b
->quota
= RUNTIME_INF
;
6013 cfs_b
->period
= ns_to_ktime(default_cfs_period());
6016 INIT_LIST_HEAD(&cfs_b
->throttled_cfs_rq
);
6017 hrtimer_init(&cfs_b
->period_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED
);
6018 cfs_b
->period_timer
.function
= sched_cfs_period_timer
;
6019 hrtimer_init(&cfs_b
->slack_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
6020 cfs_b
->slack_timer
.function
= sched_cfs_slack_timer
;
6021 cfs_b
->slack_started
= false;
6024 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
)
6026 cfs_rq
->runtime_enabled
= 0;
6027 INIT_LIST_HEAD(&cfs_rq
->throttled_list
);
6029 INIT_LIST_HEAD(&cfs_rq
->throttled_csd_list
);
6033 void start_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
6035 lockdep_assert_held(&cfs_b
->lock
);
6037 if (cfs_b
->period_active
)
6040 cfs_b
->period_active
= 1;
6041 hrtimer_forward_now(&cfs_b
->period_timer
, cfs_b
->period
);
6042 hrtimer_start_expires(&cfs_b
->period_timer
, HRTIMER_MODE_ABS_PINNED
);
6045 static void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
)
6047 int __maybe_unused i
;
6049 /* init_cfs_bandwidth() was not called */
6050 if (!cfs_b
->throttled_cfs_rq
.next
)
6053 hrtimer_cancel(&cfs_b
->period_timer
);
6054 hrtimer_cancel(&cfs_b
->slack_timer
);
6057 * It is possible that we still have some cfs_rq's pending on a CSD
6058 * list, though this race is very rare. In order for this to occur, we
6059 * must have raced with the last task leaving the group while there
6060 * exist throttled cfs_rq(s), and the period_timer must have queued the
6061 * CSD item but the remote cpu has not yet processed it. To handle this,
6062 * we can simply flush all pending CSD work inline here. We're
6063 * guaranteed at this point that no additional cfs_rq of this group can
6067 for_each_possible_cpu(i
) {
6068 struct rq
*rq
= cpu_rq(i
);
6069 unsigned long flags
;
6071 if (list_empty(&rq
->cfsb_csd_list
))
6074 local_irq_save(flags
);
6075 __cfsb_csd_unthrottle(rq
);
6076 local_irq_restore(flags
);
6082 * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
6084 * The race is harmless, since modifying bandwidth settings of unhooked group
6085 * bits doesn't do much.
6088 /* cpu online callback */
6089 static void __maybe_unused
update_runtime_enabled(struct rq
*rq
)
6091 struct task_group
*tg
;
6093 lockdep_assert_rq_held(rq
);
6096 list_for_each_entry_rcu(tg
, &task_groups
, list
) {
6097 struct cfs_bandwidth
*cfs_b
= &tg
->cfs_bandwidth
;
6098 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
6100 raw_spin_lock(&cfs_b
->lock
);
6101 cfs_rq
->runtime_enabled
= cfs_b
->quota
!= RUNTIME_INF
;
6102 raw_spin_unlock(&cfs_b
->lock
);
6107 /* cpu offline callback */
6108 static void __maybe_unused
unthrottle_offline_cfs_rqs(struct rq
*rq
)
6110 struct task_group
*tg
;
6112 lockdep_assert_rq_held(rq
);
6115 list_for_each_entry_rcu(tg
, &task_groups
, list
) {
6116 struct cfs_rq
*cfs_rq
= tg
->cfs_rq
[cpu_of(rq
)];
6118 if (!cfs_rq
->runtime_enabled
)
6122 * clock_task is not advancing so we just need to make sure
6123 * there's some valid quota amount
6125 cfs_rq
->runtime_remaining
= 1;
6127 * Offline rq is schedulable till CPU is completely disabled
6128 * in take_cpu_down(), so we prevent new cfs throttling here.
6130 cfs_rq
->runtime_enabled
= 0;
6132 if (cfs_rq_throttled(cfs_rq
))
6133 unthrottle_cfs_rq(cfs_rq
);
6138 #else /* CONFIG_CFS_BANDWIDTH */
6140 static inline bool cfs_bandwidth_used(void)
6145 static void account_cfs_rq_runtime(struct cfs_rq
*cfs_rq
, u64 delta_exec
) {}
6146 static bool check_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) { return false; }
6147 static void check_enqueue_throttle(struct cfs_rq
*cfs_rq
) {}
6148 static inline void sync_throttle(struct task_group
*tg
, int cpu
) {}
6149 static __always_inline
void return_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
6151 static inline int cfs_rq_throttled(struct cfs_rq
*cfs_rq
)
6156 static inline int throttled_hierarchy(struct cfs_rq
*cfs_rq
)
6161 static inline int throttled_lb_pair(struct task_group
*tg
,
6162 int src_cpu
, int dest_cpu
)
6167 void init_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
6169 #ifdef CONFIG_FAIR_GROUP_SCHED
6170 static void init_cfs_rq_runtime(struct cfs_rq
*cfs_rq
) {}
6173 static inline struct cfs_bandwidth
*tg_cfs_bandwidth(struct task_group
*tg
)
6177 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth
*cfs_b
) {}
6178 static inline void update_runtime_enabled(struct rq
*rq
) {}
6179 static inline void unthrottle_offline_cfs_rqs(struct rq
*rq
) {}
6181 #endif /* CONFIG_CFS_BANDWIDTH */
6183 /**************************************************
6184 * CFS operations on tasks:
6187 #ifdef CONFIG_SCHED_HRTICK
6188 static void hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
6190 struct sched_entity
*se
= &p
->se
;
6191 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
6193 SCHED_WARN_ON(task_rq(p
) != rq
);
6195 if (rq
->cfs
.h_nr_running
> 1) {
6196 u64 slice
= sched_slice(cfs_rq
, se
);
6197 u64 ran
= se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
;
6198 s64 delta
= slice
- ran
;
6201 if (task_current(rq
, p
))
6205 hrtick_start(rq
, delta
);
6210 * called from enqueue/dequeue and updates the hrtick when the
6211 * current task is from our class and nr_running is low enough
6214 static void hrtick_update(struct rq
*rq
)
6216 struct task_struct
*curr
= rq
->curr
;
6218 if (!hrtick_enabled_fair(rq
) || curr
->sched_class
!= &fair_sched_class
)
6221 if (cfs_rq_of(&curr
->se
)->nr_running
< sched_nr_latency
)
6222 hrtick_start_fair(rq
, curr
);
6224 #else /* !CONFIG_SCHED_HRTICK */
6226 hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
6230 static inline void hrtick_update(struct rq
*rq
)
6236 static inline bool cpu_overutilized(int cpu
)
6238 unsigned long rq_util_min
= uclamp_rq_get(cpu_rq(cpu
), UCLAMP_MIN
);
6239 unsigned long rq_util_max
= uclamp_rq_get(cpu_rq(cpu
), UCLAMP_MAX
);
6241 /* Return true only if the utilization doesn't fit CPU's capacity */
6242 return !util_fits_cpu(cpu_util_cfs(cpu
), rq_util_min
, rq_util_max
, cpu
);
6245 static inline void update_overutilized_status(struct rq
*rq
)
6247 if (!READ_ONCE(rq
->rd
->overutilized
) && cpu_overutilized(rq
->cpu
)) {
6248 WRITE_ONCE(rq
->rd
->overutilized
, SG_OVERUTILIZED
);
6249 trace_sched_overutilized_tp(rq
->rd
, SG_OVERUTILIZED
);
6253 static inline void update_overutilized_status(struct rq
*rq
) { }
6256 /* Runqueue only has SCHED_IDLE tasks enqueued */
6257 static int sched_idle_rq(struct rq
*rq
)
6259 return unlikely(rq
->nr_running
== rq
->cfs
.idle_h_nr_running
&&
6264 * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use
6265 * of idle_nr_running, which does not consider idle descendants of normal
6268 static bool sched_idle_cfs_rq(struct cfs_rq
*cfs_rq
)
6270 return cfs_rq
->nr_running
&&
6271 cfs_rq
->nr_running
== cfs_rq
->idle_nr_running
;
6275 static int sched_idle_cpu(int cpu
)
6277 return sched_idle_rq(cpu_rq(cpu
));
6282 * The enqueue_task method is called before nr_running is
6283 * increased. Here we update the fair scheduling stats and
6284 * then put the task into the rbtree:
6287 enqueue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
6289 struct cfs_rq
*cfs_rq
;
6290 struct sched_entity
*se
= &p
->se
;
6291 int idle_h_nr_running
= task_has_idle_policy(p
);
6292 int task_new
= !(flags
& ENQUEUE_WAKEUP
);
6295 * The code below (indirectly) updates schedutil which looks at
6296 * the cfs_rq utilization to select a frequency.
6297 * Let's add the task's estimated utilization to the cfs_rq's
6298 * estimated utilization, before we update schedutil.
6300 util_est_enqueue(&rq
->cfs
, p
);
6303 * If in_iowait is set, the code below may not trigger any cpufreq
6304 * utilization updates, so do it here explicitly with the IOWAIT flag
6308 cpufreq_update_util(rq
, SCHED_CPUFREQ_IOWAIT
);
6310 for_each_sched_entity(se
) {
6313 cfs_rq
= cfs_rq_of(se
);
6314 enqueue_entity(cfs_rq
, se
, flags
);
6316 cfs_rq
->h_nr_running
++;
6317 cfs_rq
->idle_h_nr_running
+= idle_h_nr_running
;
6319 if (cfs_rq_is_idle(cfs_rq
))
6320 idle_h_nr_running
= 1;
6322 /* end evaluation on encountering a throttled cfs_rq */
6323 if (cfs_rq_throttled(cfs_rq
))
6324 goto enqueue_throttle
;
6326 flags
= ENQUEUE_WAKEUP
;
6329 for_each_sched_entity(se
) {
6330 cfs_rq
= cfs_rq_of(se
);
6332 update_load_avg(cfs_rq
, se
, UPDATE_TG
);
6333 se_update_runnable(se
);
6334 update_cfs_group(se
);
6336 cfs_rq
->h_nr_running
++;
6337 cfs_rq
->idle_h_nr_running
+= idle_h_nr_running
;
6339 if (cfs_rq_is_idle(cfs_rq
))
6340 idle_h_nr_running
= 1;
6342 /* end evaluation on encountering a throttled cfs_rq */
6343 if (cfs_rq_throttled(cfs_rq
))
6344 goto enqueue_throttle
;
6347 /* At this point se is NULL and we are at root level*/
6348 add_nr_running(rq
, 1);
6351 * Since new tasks are assigned an initial util_avg equal to
6352 * half of the spare capacity of their CPU, tiny tasks have the
6353 * ability to cross the overutilized threshold, which will
6354 * result in the load balancer ruining all the task placement
6355 * done by EAS. As a way to mitigate that effect, do not account
6356 * for the first enqueue operation of new tasks during the
6357 * overutilized flag detection.
6359 * A better way of solving this problem would be to wait for
6360 * the PELT signals of tasks to converge before taking them
6361 * into account, but that is not straightforward to implement,
6362 * and the following generally works well enough in practice.
6365 update_overutilized_status(rq
);
6368 assert_list_leaf_cfs_rq(rq
);
6373 static void set_next_buddy(struct sched_entity
*se
);
6376 * The dequeue_task method is called before nr_running is
6377 * decreased. We remove the task from the rbtree and
6378 * update the fair scheduling stats:
6380 static void dequeue_task_fair(struct rq
*rq
, struct task_struct
*p
, int flags
)
6382 struct cfs_rq
*cfs_rq
;
6383 struct sched_entity
*se
= &p
->se
;
6384 int task_sleep
= flags
& DEQUEUE_SLEEP
;
6385 int idle_h_nr_running
= task_has_idle_policy(p
);
6386 bool was_sched_idle
= sched_idle_rq(rq
);
6388 util_est_dequeue(&rq
->cfs
, p
);
6390 for_each_sched_entity(se
) {
6391 cfs_rq
= cfs_rq_of(se
);
6392 dequeue_entity(cfs_rq
, se
, flags
);
6394 cfs_rq
->h_nr_running
--;
6395 cfs_rq
->idle_h_nr_running
-= idle_h_nr_running
;
6397 if (cfs_rq_is_idle(cfs_rq
))
6398 idle_h_nr_running
= 1;
6400 /* end evaluation on encountering a throttled cfs_rq */
6401 if (cfs_rq_throttled(cfs_rq
))
6402 goto dequeue_throttle
;
6404 /* Don't dequeue parent if it has other entities besides us */
6405 if (cfs_rq
->load
.weight
) {
6406 /* Avoid re-evaluating load for this entity: */
6407 se
= parent_entity(se
);
6409 * Bias pick_next to pick a task from this cfs_rq, as
6410 * p is sleeping when it is within its sched_slice.
6412 if (task_sleep
&& se
&& !throttled_hierarchy(cfs_rq
))
6416 flags
|= DEQUEUE_SLEEP
;
6419 for_each_sched_entity(se
) {
6420 cfs_rq
= cfs_rq_of(se
);
6422 update_load_avg(cfs_rq
, se
, UPDATE_TG
);
6423 se_update_runnable(se
);
6424 update_cfs_group(se
);
6426 cfs_rq
->h_nr_running
--;
6427 cfs_rq
->idle_h_nr_running
-= idle_h_nr_running
;
6429 if (cfs_rq_is_idle(cfs_rq
))
6430 idle_h_nr_running
= 1;
6432 /* end evaluation on encountering a throttled cfs_rq */
6433 if (cfs_rq_throttled(cfs_rq
))
6434 goto dequeue_throttle
;
6438 /* At this point se is NULL and we are at root level*/
6439 sub_nr_running(rq
, 1);
6441 /* balance early to pull high priority tasks */
6442 if (unlikely(!was_sched_idle
&& sched_idle_rq(rq
)))
6443 rq
->next_balance
= jiffies
;
6446 util_est_update(&rq
->cfs
, p
, task_sleep
);
6452 /* Working cpumask for: load_balance, load_balance_newidle. */
6453 static DEFINE_PER_CPU(cpumask_var_t
, load_balance_mask
);
6454 static DEFINE_PER_CPU(cpumask_var_t
, select_rq_mask
);
6456 #ifdef CONFIG_NO_HZ_COMMON
6459 cpumask_var_t idle_cpus_mask
;
6461 int has_blocked
; /* Idle CPUS has blocked load */
6462 int needs_update
; /* Newly idle CPUs need their next_balance collated */
6463 unsigned long next_balance
; /* in jiffy units */
6464 unsigned long next_blocked
; /* Next update of blocked load in jiffies */
6465 } nohz ____cacheline_aligned
;
6467 #endif /* CONFIG_NO_HZ_COMMON */
6469 static unsigned long cpu_load(struct rq
*rq
)
6471 return cfs_rq_load_avg(&rq
->cfs
);
6475 * cpu_load_without - compute CPU load without any contributions from *p
6476 * @cpu: the CPU which load is requested
6477 * @p: the task which load should be discounted
6479 * The load of a CPU is defined by the load of tasks currently enqueued on that
6480 * CPU as well as tasks which are currently sleeping after an execution on that
6483 * This method returns the load of the specified CPU by discounting the load of
6484 * the specified task, whenever the task is currently contributing to the CPU
6487 static unsigned long cpu_load_without(struct rq
*rq
, struct task_struct
*p
)
6489 struct cfs_rq
*cfs_rq
;
6492 /* Task has no contribution or is new */
6493 if (cpu_of(rq
) != task_cpu(p
) || !READ_ONCE(p
->se
.avg
.last_update_time
))
6494 return cpu_load(rq
);
6497 load
= READ_ONCE(cfs_rq
->avg
.load_avg
);
6499 /* Discount task's util from CPU's util */
6500 lsub_positive(&load
, task_h_load(p
));
6505 static unsigned long cpu_runnable(struct rq
*rq
)
6507 return cfs_rq_runnable_avg(&rq
->cfs
);
6510 static unsigned long cpu_runnable_without(struct rq
*rq
, struct task_struct
*p
)
6512 struct cfs_rq
*cfs_rq
;
6513 unsigned int runnable
;
6515 /* Task has no contribution or is new */
6516 if (cpu_of(rq
) != task_cpu(p
) || !READ_ONCE(p
->se
.avg
.last_update_time
))
6517 return cpu_runnable(rq
);
6520 runnable
= READ_ONCE(cfs_rq
->avg
.runnable_avg
);
6522 /* Discount task's runnable from CPU's runnable */
6523 lsub_positive(&runnable
, p
->se
.avg
.runnable_avg
);
6528 static unsigned long capacity_of(int cpu
)
6530 return cpu_rq(cpu
)->cpu_capacity
;
6533 static void record_wakee(struct task_struct
*p
)
6536 * Only decay a single time; tasks that have less then 1 wakeup per
6537 * jiffy will not have built up many flips.
6539 if (time_after(jiffies
, current
->wakee_flip_decay_ts
+ HZ
)) {
6540 current
->wakee_flips
>>= 1;
6541 current
->wakee_flip_decay_ts
= jiffies
;
6544 if (current
->last_wakee
!= p
) {
6545 current
->last_wakee
= p
;
6546 current
->wakee_flips
++;
6551 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
6553 * A waker of many should wake a different task than the one last awakened
6554 * at a frequency roughly N times higher than one of its wakees.
6556 * In order to determine whether we should let the load spread vs consolidating
6557 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
6558 * partner, and a factor of lls_size higher frequency in the other.
6560 * With both conditions met, we can be relatively sure that the relationship is
6561 * non-monogamous, with partner count exceeding socket size.
6563 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
6564 * whatever is irrelevant, spread criteria is apparent partner count exceeds
6567 static int wake_wide(struct task_struct
*p
)
6569 unsigned int master
= current
->wakee_flips
;
6570 unsigned int slave
= p
->wakee_flips
;
6571 int factor
= __this_cpu_read(sd_llc_size
);
6574 swap(master
, slave
);
6575 if (slave
< factor
|| master
< slave
* factor
)
6581 * The purpose of wake_affine() is to quickly determine on which CPU we can run
6582 * soonest. For the purpose of speed we only consider the waking and previous
6585 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
6586 * cache-affine and is (or will be) idle.
6588 * wake_affine_weight() - considers the weight to reflect the average
6589 * scheduling latency of the CPUs. This seems to work
6590 * for the overloaded case.
6593 wake_affine_idle(int this_cpu
, int prev_cpu
, int sync
)
6596 * If this_cpu is idle, it implies the wakeup is from interrupt
6597 * context. Only allow the move if cache is shared. Otherwise an
6598 * interrupt intensive workload could force all tasks onto one
6599 * node depending on the IO topology or IRQ affinity settings.
6601 * If the prev_cpu is idle and cache affine then avoid a migration.
6602 * There is no guarantee that the cache hot data from an interrupt
6603 * is more important than cache hot data on the prev_cpu and from
6604 * a cpufreq perspective, it's better to have higher utilisation
6607 if (available_idle_cpu(this_cpu
) && cpus_share_cache(this_cpu
, prev_cpu
))
6608 return available_idle_cpu(prev_cpu
) ? prev_cpu
: this_cpu
;
6610 if (sync
&& cpu_rq(this_cpu
)->nr_running
== 1)
6613 if (available_idle_cpu(prev_cpu
))
6616 return nr_cpumask_bits
;
6620 wake_affine_weight(struct sched_domain
*sd
, struct task_struct
*p
,
6621 int this_cpu
, int prev_cpu
, int sync
)
6623 s64 this_eff_load
, prev_eff_load
;
6624 unsigned long task_load
;
6626 this_eff_load
= cpu_load(cpu_rq(this_cpu
));
6629 unsigned long current_load
= task_h_load(current
);
6631 if (current_load
> this_eff_load
)
6634 this_eff_load
-= current_load
;
6637 task_load
= task_h_load(p
);
6639 this_eff_load
+= task_load
;
6640 if (sched_feat(WA_BIAS
))
6641 this_eff_load
*= 100;
6642 this_eff_load
*= capacity_of(prev_cpu
);
6644 prev_eff_load
= cpu_load(cpu_rq(prev_cpu
));
6645 prev_eff_load
-= task_load
;
6646 if (sched_feat(WA_BIAS
))
6647 prev_eff_load
*= 100 + (sd
->imbalance_pct
- 100) / 2;
6648 prev_eff_load
*= capacity_of(this_cpu
);
6651 * If sync, adjust the weight of prev_eff_load such that if
6652 * prev_eff == this_eff that select_idle_sibling() will consider
6653 * stacking the wakee on top of the waker if no other CPU is
6659 return this_eff_load
< prev_eff_load
? this_cpu
: nr_cpumask_bits
;
6662 static int wake_affine(struct sched_domain
*sd
, struct task_struct
*p
,
6663 int this_cpu
, int prev_cpu
, int sync
)
6665 int target
= nr_cpumask_bits
;
6667 if (sched_feat(WA_IDLE
))
6668 target
= wake_affine_idle(this_cpu
, prev_cpu
, sync
);
6670 if (sched_feat(WA_WEIGHT
) && target
== nr_cpumask_bits
)
6671 target
= wake_affine_weight(sd
, p
, this_cpu
, prev_cpu
, sync
);
6673 schedstat_inc(p
->stats
.nr_wakeups_affine_attempts
);
6674 if (target
== nr_cpumask_bits
)
6677 schedstat_inc(sd
->ttwu_move_affine
);
6678 schedstat_inc(p
->stats
.nr_wakeups_affine
);
6682 static struct sched_group
*
6683 find_idlest_group(struct sched_domain
*sd
, struct task_struct
*p
, int this_cpu
);
6686 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
6689 find_idlest_group_cpu(struct sched_group
*group
, struct task_struct
*p
, int this_cpu
)
6691 unsigned long load
, min_load
= ULONG_MAX
;
6692 unsigned int min_exit_latency
= UINT_MAX
;
6693 u64 latest_idle_timestamp
= 0;
6694 int least_loaded_cpu
= this_cpu
;
6695 int shallowest_idle_cpu
= -1;
6698 /* Check if we have any choice: */
6699 if (group
->group_weight
== 1)
6700 return cpumask_first(sched_group_span(group
));
6702 /* Traverse only the allowed CPUs */
6703 for_each_cpu_and(i
, sched_group_span(group
), p
->cpus_ptr
) {
6704 struct rq
*rq
= cpu_rq(i
);
6706 if (!sched_core_cookie_match(rq
, p
))
6709 if (sched_idle_cpu(i
))
6712 if (available_idle_cpu(i
)) {
6713 struct cpuidle_state
*idle
= idle_get_state(rq
);
6714 if (idle
&& idle
->exit_latency
< min_exit_latency
) {
6716 * We give priority to a CPU whose idle state
6717 * has the smallest exit latency irrespective
6718 * of any idle timestamp.
6720 min_exit_latency
= idle
->exit_latency
;
6721 latest_idle_timestamp
= rq
->idle_stamp
;
6722 shallowest_idle_cpu
= i
;
6723 } else if ((!idle
|| idle
->exit_latency
== min_exit_latency
) &&
6724 rq
->idle_stamp
> latest_idle_timestamp
) {
6726 * If equal or no active idle state, then
6727 * the most recently idled CPU might have
6730 latest_idle_timestamp
= rq
->idle_stamp
;
6731 shallowest_idle_cpu
= i
;
6733 } else if (shallowest_idle_cpu
== -1) {
6734 load
= cpu_load(cpu_rq(i
));
6735 if (load
< min_load
) {
6737 least_loaded_cpu
= i
;
6742 return shallowest_idle_cpu
!= -1 ? shallowest_idle_cpu
: least_loaded_cpu
;
6745 static inline int find_idlest_cpu(struct sched_domain
*sd
, struct task_struct
*p
,
6746 int cpu
, int prev_cpu
, int sd_flag
)
6750 if (!cpumask_intersects(sched_domain_span(sd
), p
->cpus_ptr
))
6754 * We need task's util for cpu_util_without, sync it up to
6755 * prev_cpu's last_update_time.
6757 if (!(sd_flag
& SD_BALANCE_FORK
))
6758 sync_entity_load_avg(&p
->se
);
6761 struct sched_group
*group
;
6762 struct sched_domain
*tmp
;
6765 if (!(sd
->flags
& sd_flag
)) {
6770 group
= find_idlest_group(sd
, p
, cpu
);
6776 new_cpu
= find_idlest_group_cpu(group
, p
, cpu
);
6777 if (new_cpu
== cpu
) {
6778 /* Now try balancing at a lower domain level of 'cpu': */
6783 /* Now try balancing at a lower domain level of 'new_cpu': */
6785 weight
= sd
->span_weight
;
6787 for_each_domain(cpu
, tmp
) {
6788 if (weight
<= tmp
->span_weight
)
6790 if (tmp
->flags
& sd_flag
)
6798 static inline int __select_idle_cpu(int cpu
, struct task_struct
*p
)
6800 if ((available_idle_cpu(cpu
) || sched_idle_cpu(cpu
)) &&
6801 sched_cpu_cookie_match(cpu_rq(cpu
), p
))
6807 #ifdef CONFIG_SCHED_SMT
6808 DEFINE_STATIC_KEY_FALSE(sched_smt_present
);
6809 EXPORT_SYMBOL_GPL(sched_smt_present
);
6811 static inline void set_idle_cores(int cpu
, int val
)
6813 struct sched_domain_shared
*sds
;
6815 sds
= rcu_dereference(per_cpu(sd_llc_shared
, cpu
));
6817 WRITE_ONCE(sds
->has_idle_cores
, val
);
6820 static inline bool test_idle_cores(int cpu
)
6822 struct sched_domain_shared
*sds
;
6824 sds
= rcu_dereference(per_cpu(sd_llc_shared
, cpu
));
6826 return READ_ONCE(sds
->has_idle_cores
);
6832 * Scans the local SMT mask to see if the entire core is idle, and records this
6833 * information in sd_llc_shared->has_idle_cores.
6835 * Since SMT siblings share all cache levels, inspecting this limited remote
6836 * state should be fairly cheap.
6838 void __update_idle_core(struct rq
*rq
)
6840 int core
= cpu_of(rq
);
6844 if (test_idle_cores(core
))
6847 for_each_cpu(cpu
, cpu_smt_mask(core
)) {
6851 if (!available_idle_cpu(cpu
))
6855 set_idle_cores(core
, 1);
6861 * Scan the entire LLC domain for idle cores; this dynamically switches off if
6862 * there are no idle cores left in the system; tracked through
6863 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
6865 static int select_idle_core(struct task_struct
*p
, int core
, struct cpumask
*cpus
, int *idle_cpu
)
6870 for_each_cpu(cpu
, cpu_smt_mask(core
)) {
6871 if (!available_idle_cpu(cpu
)) {
6873 if (*idle_cpu
== -1) {
6874 if (sched_idle_cpu(cpu
) && cpumask_test_cpu(cpu
, p
->cpus_ptr
)) {
6882 if (*idle_cpu
== -1 && cpumask_test_cpu(cpu
, p
->cpus_ptr
))
6889 cpumask_andnot(cpus
, cpus
, cpu_smt_mask(core
));
6894 * Scan the local SMT mask for idle CPUs.
6896 static int select_idle_smt(struct task_struct
*p
, int target
)
6900 for_each_cpu_and(cpu
, cpu_smt_mask(target
), p
->cpus_ptr
) {
6903 if (available_idle_cpu(cpu
) || sched_idle_cpu(cpu
))
6910 #else /* CONFIG_SCHED_SMT */
6912 static inline void set_idle_cores(int cpu
, int val
)
6916 static inline bool test_idle_cores(int cpu
)
6921 static inline int select_idle_core(struct task_struct
*p
, int core
, struct cpumask
*cpus
, int *idle_cpu
)
6923 return __select_idle_cpu(core
, p
);
6926 static inline int select_idle_smt(struct task_struct
*p
, int target
)
6931 #endif /* CONFIG_SCHED_SMT */
6934 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6935 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6936 * average idle time for this rq (as found in rq->avg_idle).
6938 static int select_idle_cpu(struct task_struct
*p
, struct sched_domain
*sd
, bool has_idle_core
, int target
)
6940 struct cpumask
*cpus
= this_cpu_cpumask_var_ptr(select_rq_mask
);
6941 int i
, cpu
, idle_cpu
= -1, nr
= INT_MAX
;
6942 struct sched_domain_shared
*sd_share
;
6943 struct rq
*this_rq
= this_rq();
6944 int this = smp_processor_id();
6945 struct sched_domain
*this_sd
= NULL
;
6948 cpumask_and(cpus
, sched_domain_span(sd
), p
->cpus_ptr
);
6950 if (sched_feat(SIS_PROP
) && !has_idle_core
) {
6951 u64 avg_cost
, avg_idle
, span_avg
;
6952 unsigned long now
= jiffies
;
6954 this_sd
= rcu_dereference(*this_cpu_ptr(&sd_llc
));
6959 * If we're busy, the assumption that the last idle period
6960 * predicts the future is flawed; age away the remaining
6961 * predicted idle time.
6963 if (unlikely(this_rq
->wake_stamp
< now
)) {
6964 while (this_rq
->wake_stamp
< now
&& this_rq
->wake_avg_idle
) {
6965 this_rq
->wake_stamp
++;
6966 this_rq
->wake_avg_idle
>>= 1;
6970 avg_idle
= this_rq
->wake_avg_idle
;
6971 avg_cost
= this_sd
->avg_scan_cost
+ 1;
6973 span_avg
= sd
->span_weight
* avg_idle
;
6974 if (span_avg
> 4*avg_cost
)
6975 nr
= div_u64(span_avg
, avg_cost
);
6979 time
= cpu_clock(this);
6982 if (sched_feat(SIS_UTIL
)) {
6983 sd_share
= rcu_dereference(per_cpu(sd_llc_shared
, target
));
6985 /* because !--nr is the condition to stop scan */
6986 nr
= READ_ONCE(sd_share
->nr_idle_scan
) + 1;
6987 /* overloaded LLC is unlikely to have idle cpu/core */
6993 for_each_cpu_wrap(cpu
, cpus
, target
+ 1) {
6994 if (has_idle_core
) {
6995 i
= select_idle_core(p
, cpu
, cpus
, &idle_cpu
);
6996 if ((unsigned int)i
< nr_cpumask_bits
)
7002 idle_cpu
= __select_idle_cpu(cpu
, p
);
7003 if ((unsigned int)idle_cpu
< nr_cpumask_bits
)
7009 set_idle_cores(target
, false);
7011 if (sched_feat(SIS_PROP
) && this_sd
&& !has_idle_core
) {
7012 time
= cpu_clock(this) - time
;
7015 * Account for the scan cost of wakeups against the average
7018 this_rq
->wake_avg_idle
-= min(this_rq
->wake_avg_idle
, time
);
7020 update_avg(&this_sd
->avg_scan_cost
, time
);
7027 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
7028 * the task fits. If no CPU is big enough, but there are idle ones, try to
7029 * maximize capacity.
7032 select_idle_capacity(struct task_struct
*p
, struct sched_domain
*sd
, int target
)
7034 unsigned long task_util
, util_min
, util_max
, best_cap
= 0;
7035 int fits
, best_fits
= 0;
7036 int cpu
, best_cpu
= -1;
7037 struct cpumask
*cpus
;
7039 cpus
= this_cpu_cpumask_var_ptr(select_rq_mask
);
7040 cpumask_and(cpus
, sched_domain_span(sd
), p
->cpus_ptr
);
7042 task_util
= task_util_est(p
);
7043 util_min
= uclamp_eff_value(p
, UCLAMP_MIN
);
7044 util_max
= uclamp_eff_value(p
, UCLAMP_MAX
);
7046 for_each_cpu_wrap(cpu
, cpus
, target
+ 1) {
7047 unsigned long cpu_cap
= capacity_of(cpu
);
7049 if (!available_idle_cpu(cpu
) && !sched_idle_cpu(cpu
))
7052 fits
= util_fits_cpu(task_util
, util_min
, util_max
, cpu
);
7054 /* This CPU fits with all requirements */
7058 * Only the min performance hint (i.e. uclamp_min) doesn't fit.
7059 * Look for the CPU with best capacity.
7062 cpu_cap
= capacity_orig_of(cpu
) - thermal_load_avg(cpu_rq(cpu
));
7065 * First, select CPU which fits better (-1 being better than 0).
7066 * Then, select the one with best capacity at same level.
7068 if ((fits
< best_fits
) ||
7069 ((fits
== best_fits
) && (cpu_cap
> best_cap
))) {
7079 static inline bool asym_fits_cpu(unsigned long util
,
7080 unsigned long util_min
,
7081 unsigned long util_max
,
7084 if (sched_asym_cpucap_active())
7086 * Return true only if the cpu fully fits the task requirements
7087 * which include the utilization and the performance hints.
7089 return (util_fits_cpu(util
, util_min
, util_max
, cpu
) > 0);
7095 * Try and locate an idle core/thread in the LLC cache domain.
7097 static int select_idle_sibling(struct task_struct
*p
, int prev
, int target
)
7099 bool has_idle_core
= false;
7100 struct sched_domain
*sd
;
7101 unsigned long task_util
, util_min
, util_max
;
7102 int i
, recent_used_cpu
;
7105 * On asymmetric system, update task utilization because we will check
7106 * that the task fits with cpu's capacity.
7108 if (sched_asym_cpucap_active()) {
7109 sync_entity_load_avg(&p
->se
);
7110 task_util
= task_util_est(p
);
7111 util_min
= uclamp_eff_value(p
, UCLAMP_MIN
);
7112 util_max
= uclamp_eff_value(p
, UCLAMP_MAX
);
7116 * per-cpu select_rq_mask usage
7118 lockdep_assert_irqs_disabled();
7120 if ((available_idle_cpu(target
) || sched_idle_cpu(target
)) &&
7121 asym_fits_cpu(task_util
, util_min
, util_max
, target
))
7125 * If the previous CPU is cache affine and idle, don't be stupid:
7127 if (prev
!= target
&& cpus_share_cache(prev
, target
) &&
7128 (available_idle_cpu(prev
) || sched_idle_cpu(prev
)) &&
7129 asym_fits_cpu(task_util
, util_min
, util_max
, prev
))
7133 * Allow a per-cpu kthread to stack with the wakee if the
7134 * kworker thread and the tasks previous CPUs are the same.
7135 * The assumption is that the wakee queued work for the
7136 * per-cpu kthread that is now complete and the wakeup is
7137 * essentially a sync wakeup. An obvious example of this
7138 * pattern is IO completions.
7140 if (is_per_cpu_kthread(current
) &&
7142 prev
== smp_processor_id() &&
7143 this_rq()->nr_running
<= 1 &&
7144 asym_fits_cpu(task_util
, util_min
, util_max
, prev
)) {
7148 /* Check a recently used CPU as a potential idle candidate: */
7149 recent_used_cpu
= p
->recent_used_cpu
;
7150 p
->recent_used_cpu
= prev
;
7151 if (recent_used_cpu
!= prev
&&
7152 recent_used_cpu
!= target
&&
7153 cpus_share_cache(recent_used_cpu
, target
) &&
7154 (available_idle_cpu(recent_used_cpu
) || sched_idle_cpu(recent_used_cpu
)) &&
7155 cpumask_test_cpu(p
->recent_used_cpu
, p
->cpus_ptr
) &&
7156 asym_fits_cpu(task_util
, util_min
, util_max
, recent_used_cpu
)) {
7157 return recent_used_cpu
;
7161 * For asymmetric CPU capacity systems, our domain of interest is
7162 * sd_asym_cpucapacity rather than sd_llc.
7164 if (sched_asym_cpucap_active()) {
7165 sd
= rcu_dereference(per_cpu(sd_asym_cpucapacity
, target
));
7167 * On an asymmetric CPU capacity system where an exclusive
7168 * cpuset defines a symmetric island (i.e. one unique
7169 * capacity_orig value through the cpuset), the key will be set
7170 * but the CPUs within that cpuset will not have a domain with
7171 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
7175 i
= select_idle_capacity(p
, sd
, target
);
7176 return ((unsigned)i
< nr_cpumask_bits
) ? i
: target
;
7180 sd
= rcu_dereference(per_cpu(sd_llc
, target
));
7184 if (sched_smt_active()) {
7185 has_idle_core
= test_idle_cores(target
);
7187 if (!has_idle_core
&& cpus_share_cache(prev
, target
)) {
7188 i
= select_idle_smt(p
, prev
);
7189 if ((unsigned int)i
< nr_cpumask_bits
)
7194 i
= select_idle_cpu(p
, sd
, has_idle_core
, target
);
7195 if ((unsigned)i
< nr_cpumask_bits
)
7202 * Predicts what cpu_util(@cpu) would return if @p was removed from @cpu
7203 * (@dst_cpu = -1) or migrated to @dst_cpu.
7205 static unsigned long cpu_util_next(int cpu
, struct task_struct
*p
, int dst_cpu
)
7207 struct cfs_rq
*cfs_rq
= &cpu_rq(cpu
)->cfs
;
7208 unsigned long util
= READ_ONCE(cfs_rq
->avg
.util_avg
);
7211 * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
7212 * contribution. If @p migrates from another CPU to @cpu add its
7213 * contribution. In all the other cases @cpu is not impacted by the
7214 * migration so its util_avg is already correct.
7216 if (task_cpu(p
) == cpu
&& dst_cpu
!= cpu
)
7217 lsub_positive(&util
, task_util(p
));
7218 else if (task_cpu(p
) != cpu
&& dst_cpu
== cpu
)
7219 util
+= task_util(p
);
7221 if (sched_feat(UTIL_EST
)) {
7222 unsigned long util_est
;
7224 util_est
= READ_ONCE(cfs_rq
->avg
.util_est
.enqueued
);
7227 * During wake-up @p isn't enqueued yet and doesn't contribute
7228 * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
7229 * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
7230 * has been enqueued.
7232 * During exec (@dst_cpu = -1) @p is enqueued and does
7233 * contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
7234 * Remove it to "simulate" cpu_util without @p's contribution.
7236 * Despite the task_on_rq_queued(@p) check there is still a
7237 * small window for a possible race when an exec
7238 * select_task_rq_fair() races with LB's detach_task().
7242 * p->on_rq = TASK_ON_RQ_MIGRATING;
7243 * -------------------------------- A
7245 * dequeue_task_fair() + Race Time
7246 * util_est_dequeue() /
7247 * -------------------------------- B
7249 * The additional check "current == p" is required to further
7250 * reduce the race window.
7253 util_est
+= _task_util_est(p
);
7254 else if (unlikely(task_on_rq_queued(p
) || current
== p
))
7255 lsub_positive(&util_est
, _task_util_est(p
));
7257 util
= max(util
, util_est
);
7260 return min(util
, capacity_orig_of(cpu
));
7264 * cpu_util_without: compute cpu utilization without any contributions from *p
7265 * @cpu: the CPU which utilization is requested
7266 * @p: the task which utilization should be discounted
7268 * The utilization of a CPU is defined by the utilization of tasks currently
7269 * enqueued on that CPU as well as tasks which are currently sleeping after an
7270 * execution on that CPU.
7272 * This method returns the utilization of the specified CPU by discounting the
7273 * utilization of the specified task, whenever the task is currently
7274 * contributing to the CPU utilization.
7276 static unsigned long cpu_util_without(int cpu
, struct task_struct
*p
)
7278 /* Task has no contribution or is new */
7279 if (cpu
!= task_cpu(p
) || !READ_ONCE(p
->se
.avg
.last_update_time
))
7280 return cpu_util_cfs(cpu
);
7282 return cpu_util_next(cpu
, p
, -1);
7286 * energy_env - Utilization landscape for energy estimation.
7287 * @task_busy_time: Utilization contribution by the task for which we test the
7288 * placement. Given by eenv_task_busy_time().
7289 * @pd_busy_time: Utilization of the whole perf domain without the task
7290 * contribution. Given by eenv_pd_busy_time().
7291 * @cpu_cap: Maximum CPU capacity for the perf domain.
7292 * @pd_cap: Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
7295 unsigned long task_busy_time
;
7296 unsigned long pd_busy_time
;
7297 unsigned long cpu_cap
;
7298 unsigned long pd_cap
;
7302 * Compute the task busy time for compute_energy(). This time cannot be
7303 * injected directly into effective_cpu_util() because of the IRQ scaling.
7304 * The latter only makes sense with the most recent CPUs where the task has
7307 static inline void eenv_task_busy_time(struct energy_env
*eenv
,
7308 struct task_struct
*p
, int prev_cpu
)
7310 unsigned long busy_time
, max_cap
= arch_scale_cpu_capacity(prev_cpu
);
7311 unsigned long irq
= cpu_util_irq(cpu_rq(prev_cpu
));
7313 if (unlikely(irq
>= max_cap
))
7314 busy_time
= max_cap
;
7316 busy_time
= scale_irq_capacity(task_util_est(p
), irq
, max_cap
);
7318 eenv
->task_busy_time
= busy_time
;
7322 * Compute the perf_domain (PD) busy time for compute_energy(). Based on the
7323 * utilization for each @pd_cpus, it however doesn't take into account
7324 * clamping since the ratio (utilization / cpu_capacity) is already enough to
7325 * scale the EM reported power consumption at the (eventually clamped)
7328 * The contribution of the task @p for which we want to estimate the
7329 * energy cost is removed (by cpu_util_next()) and must be calculated
7330 * separately (see eenv_task_busy_time). This ensures:
7332 * - A stable PD utilization, no matter which CPU of that PD we want to place
7335 * - A fair comparison between CPUs as the task contribution (task_util())
7336 * will always be the same no matter which CPU utilization we rely on
7337 * (util_avg or util_est).
7339 * Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't
7340 * exceed @eenv->pd_cap.
7342 static inline void eenv_pd_busy_time(struct energy_env
*eenv
,
7343 struct cpumask
*pd_cpus
,
7344 struct task_struct
*p
)
7346 unsigned long busy_time
= 0;
7349 for_each_cpu(cpu
, pd_cpus
) {
7350 unsigned long util
= cpu_util_next(cpu
, p
, -1);
7352 busy_time
+= effective_cpu_util(cpu
, util
, ENERGY_UTIL
, NULL
);
7355 eenv
->pd_busy_time
= min(eenv
->pd_cap
, busy_time
);
7359 * Compute the maximum utilization for compute_energy() when the task @p
7360 * is placed on the cpu @dst_cpu.
7362 * Returns the maximum utilization among @eenv->cpus. This utilization can't
7363 * exceed @eenv->cpu_cap.
7365 static inline unsigned long
7366 eenv_pd_max_util(struct energy_env
*eenv
, struct cpumask
*pd_cpus
,
7367 struct task_struct
*p
, int dst_cpu
)
7369 unsigned long max_util
= 0;
7372 for_each_cpu(cpu
, pd_cpus
) {
7373 struct task_struct
*tsk
= (cpu
== dst_cpu
) ? p
: NULL
;
7374 unsigned long util
= cpu_util_next(cpu
, p
, dst_cpu
);
7375 unsigned long cpu_util
;
7378 * Performance domain frequency: utilization clamping
7379 * must be considered since it affects the selection
7380 * of the performance domain frequency.
7381 * NOTE: in case RT tasks are running, by default the
7382 * FREQUENCY_UTIL's utilization can be max OPP.
7384 cpu_util
= effective_cpu_util(cpu
, util
, FREQUENCY_UTIL
, tsk
);
7385 max_util
= max(max_util
, cpu_util
);
7388 return min(max_util
, eenv
->cpu_cap
);
7392 * compute_energy(): Use the Energy Model to estimate the energy that @pd would
7393 * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
7394 * contribution is ignored.
7396 static inline unsigned long
7397 compute_energy(struct energy_env
*eenv
, struct perf_domain
*pd
,
7398 struct cpumask
*pd_cpus
, struct task_struct
*p
, int dst_cpu
)
7400 unsigned long max_util
= eenv_pd_max_util(eenv
, pd_cpus
, p
, dst_cpu
);
7401 unsigned long busy_time
= eenv
->pd_busy_time
;
7404 busy_time
= min(eenv
->pd_cap
, busy_time
+ eenv
->task_busy_time
);
7406 return em_cpu_energy(pd
->em_pd
, max_util
, busy_time
, eenv
->cpu_cap
);
7410 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
7411 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
7412 * spare capacity in each performance domain and uses it as a potential
7413 * candidate to execute the task. Then, it uses the Energy Model to figure
7414 * out which of the CPU candidates is the most energy-efficient.
7416 * The rationale for this heuristic is as follows. In a performance domain,
7417 * all the most energy efficient CPU candidates (according to the Energy
7418 * Model) are those for which we'll request a low frequency. When there are
7419 * several CPUs for which the frequency request will be the same, we don't
7420 * have enough data to break the tie between them, because the Energy Model
7421 * only includes active power costs. With this model, if we assume that
7422 * frequency requests follow utilization (e.g. using schedutil), the CPU with
7423 * the maximum spare capacity in a performance domain is guaranteed to be among
7424 * the best candidates of the performance domain.
7426 * In practice, it could be preferable from an energy standpoint to pack
7427 * small tasks on a CPU in order to let other CPUs go in deeper idle states,
7428 * but that could also hurt our chances to go cluster idle, and we have no
7429 * ways to tell with the current Energy Model if this is actually a good
7430 * idea or not. So, find_energy_efficient_cpu() basically favors
7431 * cluster-packing, and spreading inside a cluster. That should at least be
7432 * a good thing for latency, and this is consistent with the idea that most
7433 * of the energy savings of EAS come from the asymmetry of the system, and
7434 * not so much from breaking the tie between identical CPUs. That's also the
7435 * reason why EAS is enabled in the topology code only for systems where
7436 * SD_ASYM_CPUCAPACITY is set.
7438 * NOTE: Forkees are not accepted in the energy-aware wake-up path because
7439 * they don't have any useful utilization data yet and it's not possible to
7440 * forecast their impact on energy consumption. Consequently, they will be
7441 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
7442 * to be energy-inefficient in some use-cases. The alternative would be to
7443 * bias new tasks towards specific types of CPUs first, or to try to infer
7444 * their util_avg from the parent task, but those heuristics could hurt
7445 * other use-cases too. So, until someone finds a better way to solve this,
7446 * let's keep things simple by re-using the existing slow path.
7448 static int find_energy_efficient_cpu(struct task_struct
*p
, int prev_cpu
)
7450 struct cpumask
*cpus
= this_cpu_cpumask_var_ptr(select_rq_mask
);
7451 unsigned long prev_delta
= ULONG_MAX
, best_delta
= ULONG_MAX
;
7452 unsigned long p_util_min
= uclamp_is_used() ? uclamp_eff_value(p
, UCLAMP_MIN
) : 0;
7453 unsigned long p_util_max
= uclamp_is_used() ? uclamp_eff_value(p
, UCLAMP_MAX
) : 1024;
7454 struct root_domain
*rd
= this_rq()->rd
;
7455 int cpu
, best_energy_cpu
, target
= -1;
7456 int prev_fits
= -1, best_fits
= -1;
7457 unsigned long best_thermal_cap
= 0;
7458 unsigned long prev_thermal_cap
= 0;
7459 struct sched_domain
*sd
;
7460 struct perf_domain
*pd
;
7461 struct energy_env eenv
;
7464 pd
= rcu_dereference(rd
->pd
);
7465 if (!pd
|| READ_ONCE(rd
->overutilized
))
7469 * Energy-aware wake-up happens on the lowest sched_domain starting
7470 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
7472 sd
= rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity
));
7473 while (sd
&& !cpumask_test_cpu(prev_cpu
, sched_domain_span(sd
)))
7480 sync_entity_load_avg(&p
->se
);
7481 if (!uclamp_task_util(p
, p_util_min
, p_util_max
))
7484 eenv_task_busy_time(&eenv
, p
, prev_cpu
);
7486 for (; pd
; pd
= pd
->next
) {
7487 unsigned long util_min
= p_util_min
, util_max
= p_util_max
;
7488 unsigned long cpu_cap
, cpu_thermal_cap
, util
;
7489 unsigned long cur_delta
, max_spare_cap
= 0;
7490 unsigned long rq_util_min
, rq_util_max
;
7491 unsigned long prev_spare_cap
= 0;
7492 int max_spare_cap_cpu
= -1;
7493 unsigned long base_energy
;
7494 int fits
, max_fits
= -1;
7496 cpumask_and(cpus
, perf_domain_span(pd
), cpu_online_mask
);
7498 if (cpumask_empty(cpus
))
7501 /* Account thermal pressure for the energy estimation */
7502 cpu
= cpumask_first(cpus
);
7503 cpu_thermal_cap
= arch_scale_cpu_capacity(cpu
);
7504 cpu_thermal_cap
-= arch_scale_thermal_pressure(cpu
);
7506 eenv
.cpu_cap
= cpu_thermal_cap
;
7509 for_each_cpu(cpu
, cpus
) {
7510 struct rq
*rq
= cpu_rq(cpu
);
7512 eenv
.pd_cap
+= cpu_thermal_cap
;
7514 if (!cpumask_test_cpu(cpu
, sched_domain_span(sd
)))
7517 if (!cpumask_test_cpu(cpu
, p
->cpus_ptr
))
7520 util
= cpu_util_next(cpu
, p
, cpu
);
7521 cpu_cap
= capacity_of(cpu
);
7524 * Skip CPUs that cannot satisfy the capacity request.
7525 * IOW, placing the task there would make the CPU
7526 * overutilized. Take uclamp into account to see how
7527 * much capacity we can get out of the CPU; this is
7528 * aligned with sched_cpu_util().
7530 if (uclamp_is_used() && !uclamp_rq_is_idle(rq
)) {
7532 * Open code uclamp_rq_util_with() except for
7533 * the clamp() part. Ie: apply max aggregation
7534 * only. util_fits_cpu() logic requires to
7535 * operate on non clamped util but must use the
7536 * max-aggregated uclamp_{min, max}.
7538 rq_util_min
= uclamp_rq_get(rq
, UCLAMP_MIN
);
7539 rq_util_max
= uclamp_rq_get(rq
, UCLAMP_MAX
);
7541 util_min
= max(rq_util_min
, p_util_min
);
7542 util_max
= max(rq_util_max
, p_util_max
);
7545 fits
= util_fits_cpu(util
, util_min
, util_max
, cpu
);
7549 lsub_positive(&cpu_cap
, util
);
7551 if (cpu
== prev_cpu
) {
7552 /* Always use prev_cpu as a candidate. */
7553 prev_spare_cap
= cpu_cap
;
7555 } else if ((fits
> max_fits
) ||
7556 ((fits
== max_fits
) && (cpu_cap
> max_spare_cap
))) {
7558 * Find the CPU with the maximum spare capacity
7559 * among the remaining CPUs in the performance
7562 max_spare_cap
= cpu_cap
;
7563 max_spare_cap_cpu
= cpu
;
7568 if (max_spare_cap_cpu
< 0 && prev_spare_cap
== 0)
7571 eenv_pd_busy_time(&eenv
, cpus
, p
);
7572 /* Compute the 'base' energy of the pd, without @p */
7573 base_energy
= compute_energy(&eenv
, pd
, cpus
, p
, -1);
7575 /* Evaluate the energy impact of using prev_cpu. */
7576 if (prev_spare_cap
> 0) {
7577 prev_delta
= compute_energy(&eenv
, pd
, cpus
, p
,
7579 /* CPU utilization has changed */
7580 if (prev_delta
< base_energy
)
7582 prev_delta
-= base_energy
;
7583 prev_thermal_cap
= cpu_thermal_cap
;
7584 best_delta
= min(best_delta
, prev_delta
);
7587 /* Evaluate the energy impact of using max_spare_cap_cpu. */
7588 if (max_spare_cap_cpu
>= 0 && max_spare_cap
> prev_spare_cap
) {
7589 /* Current best energy cpu fits better */
7590 if (max_fits
< best_fits
)
7594 * Both don't fit performance hint (i.e. uclamp_min)
7595 * but best energy cpu has better capacity.
7597 if ((max_fits
< 0) &&
7598 (cpu_thermal_cap
<= best_thermal_cap
))
7601 cur_delta
= compute_energy(&eenv
, pd
, cpus
, p
,
7603 /* CPU utilization has changed */
7604 if (cur_delta
< base_energy
)
7606 cur_delta
-= base_energy
;
7609 * Both fit for the task but best energy cpu has lower
7612 if ((max_fits
> 0) && (best_fits
> 0) &&
7613 (cur_delta
>= best_delta
))
7616 best_delta
= cur_delta
;
7617 best_energy_cpu
= max_spare_cap_cpu
;
7618 best_fits
= max_fits
;
7619 best_thermal_cap
= cpu_thermal_cap
;
7624 if ((best_fits
> prev_fits
) ||
7625 ((best_fits
> 0) && (best_delta
< prev_delta
)) ||
7626 ((best_fits
< 0) && (best_thermal_cap
> prev_thermal_cap
)))
7627 target
= best_energy_cpu
;
7638 * select_task_rq_fair: Select target runqueue for the waking task in domains
7639 * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
7640 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
7642 * Balances load by selecting the idlest CPU in the idlest group, or under
7643 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
7645 * Returns the target CPU number.
7648 select_task_rq_fair(struct task_struct
*p
, int prev_cpu
, int wake_flags
)
7650 int sync
= (wake_flags
& WF_SYNC
) && !(current
->flags
& PF_EXITING
);
7651 struct sched_domain
*tmp
, *sd
= NULL
;
7652 int cpu
= smp_processor_id();
7653 int new_cpu
= prev_cpu
;
7654 int want_affine
= 0;
7655 /* SD_flags and WF_flags share the first nibble */
7656 int sd_flag
= wake_flags
& 0xF;
7659 * required for stable ->cpus_allowed
7661 lockdep_assert_held(&p
->pi_lock
);
7662 if (wake_flags
& WF_TTWU
) {
7665 if (sched_energy_enabled()) {
7666 new_cpu
= find_energy_efficient_cpu(p
, prev_cpu
);
7672 want_affine
= !wake_wide(p
) && cpumask_test_cpu(cpu
, p
->cpus_ptr
);
7676 for_each_domain(cpu
, tmp
) {
7678 * If both 'cpu' and 'prev_cpu' are part of this domain,
7679 * cpu is a valid SD_WAKE_AFFINE target.
7681 if (want_affine
&& (tmp
->flags
& SD_WAKE_AFFINE
) &&
7682 cpumask_test_cpu(prev_cpu
, sched_domain_span(tmp
))) {
7683 if (cpu
!= prev_cpu
)
7684 new_cpu
= wake_affine(tmp
, p
, cpu
, prev_cpu
, sync
);
7686 sd
= NULL
; /* Prefer wake_affine over balance flags */
7691 * Usually only true for WF_EXEC and WF_FORK, as sched_domains
7692 * usually do not have SD_BALANCE_WAKE set. That means wakeup
7693 * will usually go to the fast path.
7695 if (tmp
->flags
& sd_flag
)
7697 else if (!want_affine
)
7703 new_cpu
= find_idlest_cpu(sd
, p
, cpu
, prev_cpu
, sd_flag
);
7704 } else if (wake_flags
& WF_TTWU
) { /* XXX always ? */
7706 new_cpu
= select_idle_sibling(p
, prev_cpu
, new_cpu
);
7714 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
7715 * cfs_rq_of(p) references at time of call are still valid and identify the
7716 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
7718 static void migrate_task_rq_fair(struct task_struct
*p
, int new_cpu
)
7720 struct sched_entity
*se
= &p
->se
;
7723 * As blocked tasks retain absolute vruntime the migration needs to
7724 * deal with this by subtracting the old and adding the new
7725 * min_vruntime -- the latter is done by enqueue_entity() when placing
7726 * the task on the new runqueue.
7728 if (READ_ONCE(p
->__state
) == TASK_WAKING
) {
7729 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
7731 se
->vruntime
-= u64_u32_load(cfs_rq
->min_vruntime
);
7734 if (!task_on_rq_migrating(p
)) {
7735 remove_entity_load_avg(se
);
7738 * Here, the task's PELT values have been updated according to
7739 * the current rq's clock. But if that clock hasn't been
7740 * updated in a while, a substantial idle time will be missed,
7741 * leading to an inflation after wake-up on the new rq.
7743 * Estimate the missing time from the cfs_rq last_update_time
7744 * and update sched_avg to improve the PELT continuity after
7747 migrate_se_pelt_lag(se
);
7750 /* Tell new CPU we are migrated */
7751 se
->avg
.last_update_time
= 0;
7753 update_scan_period(p
, new_cpu
);
7756 static void task_dead_fair(struct task_struct
*p
)
7758 remove_entity_load_avg(&p
->se
);
7762 balance_fair(struct rq
*rq
, struct task_struct
*prev
, struct rq_flags
*rf
)
7767 return newidle_balance(rq
, rf
) != 0;
7769 #endif /* CONFIG_SMP */
7771 static unsigned long wakeup_gran(struct sched_entity
*se
)
7773 unsigned long gran
= sysctl_sched_wakeup_granularity
;
7776 * Since its curr running now, convert the gran from real-time
7777 * to virtual-time in his units.
7779 * By using 'se' instead of 'curr' we penalize light tasks, so
7780 * they get preempted easier. That is, if 'se' < 'curr' then
7781 * the resulting gran will be larger, therefore penalizing the
7782 * lighter, if otoh 'se' > 'curr' then the resulting gran will
7783 * be smaller, again penalizing the lighter task.
7785 * This is especially important for buddies when the leftmost
7786 * task is higher priority than the buddy.
7788 return calc_delta_fair(gran
, se
);
7792 * Should 'se' preempt 'curr'.
7806 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
)
7808 s64 gran
, vdiff
= curr
->vruntime
- se
->vruntime
;
7813 gran
= wakeup_gran(se
);
7820 static void set_last_buddy(struct sched_entity
*se
)
7822 for_each_sched_entity(se
) {
7823 if (SCHED_WARN_ON(!se
->on_rq
))
7827 cfs_rq_of(se
)->last
= se
;
7831 static void set_next_buddy(struct sched_entity
*se
)
7833 for_each_sched_entity(se
) {
7834 if (SCHED_WARN_ON(!se
->on_rq
))
7838 cfs_rq_of(se
)->next
= se
;
7842 static void set_skip_buddy(struct sched_entity
*se
)
7844 for_each_sched_entity(se
)
7845 cfs_rq_of(se
)->skip
= se
;
7849 * Preempt the current task with a newly woken task if needed:
7851 static void check_preempt_wakeup(struct rq
*rq
, struct task_struct
*p
, int wake_flags
)
7853 struct task_struct
*curr
= rq
->curr
;
7854 struct sched_entity
*se
= &curr
->se
, *pse
= &p
->se
;
7855 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
7856 int scale
= cfs_rq
->nr_running
>= sched_nr_latency
;
7857 int next_buddy_marked
= 0;
7858 int cse_is_idle
, pse_is_idle
;
7860 if (unlikely(se
== pse
))
7864 * This is possible from callers such as attach_tasks(), in which we
7865 * unconditionally check_preempt_curr() after an enqueue (which may have
7866 * lead to a throttle). This both saves work and prevents false
7867 * next-buddy nomination below.
7869 if (unlikely(throttled_hierarchy(cfs_rq_of(pse
))))
7872 if (sched_feat(NEXT_BUDDY
) && scale
&& !(wake_flags
& WF_FORK
)) {
7873 set_next_buddy(pse
);
7874 next_buddy_marked
= 1;
7878 * We can come here with TIF_NEED_RESCHED already set from new task
7881 * Note: this also catches the edge-case of curr being in a throttled
7882 * group (e.g. via set_curr_task), since update_curr() (in the
7883 * enqueue of curr) will have resulted in resched being set. This
7884 * prevents us from potentially nominating it as a false LAST_BUDDY
7887 if (test_tsk_need_resched(curr
))
7890 /* Idle tasks are by definition preempted by non-idle tasks. */
7891 if (unlikely(task_has_idle_policy(curr
)) &&
7892 likely(!task_has_idle_policy(p
)))
7896 * Batch and idle tasks do not preempt non-idle tasks (their preemption
7897 * is driven by the tick):
7899 if (unlikely(p
->policy
!= SCHED_NORMAL
) || !sched_feat(WAKEUP_PREEMPTION
))
7902 find_matching_se(&se
, &pse
);
7905 cse_is_idle
= se_is_idle(se
);
7906 pse_is_idle
= se_is_idle(pse
);
7909 * Preempt an idle group in favor of a non-idle group (and don't preempt
7910 * in the inverse case).
7912 if (cse_is_idle
&& !pse_is_idle
)
7914 if (cse_is_idle
!= pse_is_idle
)
7917 update_curr(cfs_rq_of(se
));
7918 if (wakeup_preempt_entity(se
, pse
) == 1) {
7920 * Bias pick_next to pick the sched entity that is
7921 * triggering this preemption.
7923 if (!next_buddy_marked
)
7924 set_next_buddy(pse
);
7933 * Only set the backward buddy when the current task is still
7934 * on the rq. This can happen when a wakeup gets interleaved
7935 * with schedule on the ->pre_schedule() or idle_balance()
7936 * point, either of which can * drop the rq lock.
7938 * Also, during early boot the idle thread is in the fair class,
7939 * for obvious reasons its a bad idea to schedule back to it.
7941 if (unlikely(!se
->on_rq
|| curr
== rq
->idle
))
7944 if (sched_feat(LAST_BUDDY
) && scale
&& entity_is_task(se
))
7949 static struct task_struct
*pick_task_fair(struct rq
*rq
)
7951 struct sched_entity
*se
;
7952 struct cfs_rq
*cfs_rq
;
7956 if (!cfs_rq
->nr_running
)
7960 struct sched_entity
*curr
= cfs_rq
->curr
;
7962 /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
7965 update_curr(cfs_rq
);
7969 if (unlikely(check_cfs_rq_runtime(cfs_rq
)))
7973 se
= pick_next_entity(cfs_rq
, curr
);
7974 cfs_rq
= group_cfs_rq(se
);
7981 struct task_struct
*
7982 pick_next_task_fair(struct rq
*rq
, struct task_struct
*prev
, struct rq_flags
*rf
)
7984 struct cfs_rq
*cfs_rq
= &rq
->cfs
;
7985 struct sched_entity
*se
;
7986 struct task_struct
*p
;
7990 if (!sched_fair_runnable(rq
))
7993 #ifdef CONFIG_FAIR_GROUP_SCHED
7994 if (!prev
|| prev
->sched_class
!= &fair_sched_class
)
7998 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
7999 * likely that a next task is from the same cgroup as the current.
8001 * Therefore attempt to avoid putting and setting the entire cgroup
8002 * hierarchy, only change the part that actually changes.
8006 struct sched_entity
*curr
= cfs_rq
->curr
;
8009 * Since we got here without doing put_prev_entity() we also
8010 * have to consider cfs_rq->curr. If it is still a runnable
8011 * entity, update_curr() will update its vruntime, otherwise
8012 * forget we've ever seen it.
8016 update_curr(cfs_rq
);
8021 * This call to check_cfs_rq_runtime() will do the
8022 * throttle and dequeue its entity in the parent(s).
8023 * Therefore the nr_running test will indeed
8026 if (unlikely(check_cfs_rq_runtime(cfs_rq
))) {
8029 if (!cfs_rq
->nr_running
)
8036 se
= pick_next_entity(cfs_rq
, curr
);
8037 cfs_rq
= group_cfs_rq(se
);
8043 * Since we haven't yet done put_prev_entity and if the selected task
8044 * is a different task than we started out with, try and touch the
8045 * least amount of cfs_rqs.
8048 struct sched_entity
*pse
= &prev
->se
;
8050 while (!(cfs_rq
= is_same_group(se
, pse
))) {
8051 int se_depth
= se
->depth
;
8052 int pse_depth
= pse
->depth
;
8054 if (se_depth
<= pse_depth
) {
8055 put_prev_entity(cfs_rq_of(pse
), pse
);
8056 pse
= parent_entity(pse
);
8058 if (se_depth
>= pse_depth
) {
8059 set_next_entity(cfs_rq_of(se
), se
);
8060 se
= parent_entity(se
);
8064 put_prev_entity(cfs_rq
, pse
);
8065 set_next_entity(cfs_rq
, se
);
8072 put_prev_task(rq
, prev
);
8075 se
= pick_next_entity(cfs_rq
, NULL
);
8076 set_next_entity(cfs_rq
, se
);
8077 cfs_rq
= group_cfs_rq(se
);
8082 done
: __maybe_unused
;
8085 * Move the next running task to the front of
8086 * the list, so our cfs_tasks list becomes MRU
8089 list_move(&p
->se
.group_node
, &rq
->cfs_tasks
);
8092 if (hrtick_enabled_fair(rq
))
8093 hrtick_start_fair(rq
, p
);
8095 update_misfit_status(p
, rq
);
8103 new_tasks
= newidle_balance(rq
, rf
);
8106 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
8107 * possible for any higher priority task to appear. In that case we
8108 * must re-start the pick_next_entity() loop.
8117 * rq is about to be idle, check if we need to update the
8118 * lost_idle_time of clock_pelt
8120 update_idle_rq_clock_pelt(rq
);
8125 static struct task_struct
*__pick_next_task_fair(struct rq
*rq
)
8127 return pick_next_task_fair(rq
, NULL
, NULL
);
8131 * Account for a descheduled task:
8133 static void put_prev_task_fair(struct rq
*rq
, struct task_struct
*prev
)
8135 struct sched_entity
*se
= &prev
->se
;
8136 struct cfs_rq
*cfs_rq
;
8138 for_each_sched_entity(se
) {
8139 cfs_rq
= cfs_rq_of(se
);
8140 put_prev_entity(cfs_rq
, se
);
8145 * sched_yield() is very simple
8147 * The magic of dealing with the ->skip buddy is in pick_next_entity.
8149 static void yield_task_fair(struct rq
*rq
)
8151 struct task_struct
*curr
= rq
->curr
;
8152 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
8153 struct sched_entity
*se
= &curr
->se
;
8156 * Are we the only task in the tree?
8158 if (unlikely(rq
->nr_running
== 1))
8161 clear_buddies(cfs_rq
, se
);
8163 if (curr
->policy
!= SCHED_BATCH
) {
8164 update_rq_clock(rq
);
8166 * Update run-time statistics of the 'current'.
8168 update_curr(cfs_rq
);
8170 * Tell update_rq_clock() that we've just updated,
8171 * so we don't do microscopic update in schedule()
8172 * and double the fastpath cost.
8174 rq_clock_skip_update(rq
);
8180 static bool yield_to_task_fair(struct rq
*rq
, struct task_struct
*p
)
8182 struct sched_entity
*se
= &p
->se
;
8184 /* throttled hierarchies are not runnable */
8185 if (!se
->on_rq
|| throttled_hierarchy(cfs_rq_of(se
)))
8188 /* Tell the scheduler that we'd really like pse to run next. */
8191 yield_task_fair(rq
);
8197 /**************************************************
8198 * Fair scheduling class load-balancing methods.
8202 * The purpose of load-balancing is to achieve the same basic fairness the
8203 * per-CPU scheduler provides, namely provide a proportional amount of compute
8204 * time to each task. This is expressed in the following equation:
8206 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
8208 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
8209 * W_i,0 is defined as:
8211 * W_i,0 = \Sum_j w_i,j (2)
8213 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
8214 * is derived from the nice value as per sched_prio_to_weight[].
8216 * The weight average is an exponential decay average of the instantaneous
8219 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
8221 * C_i is the compute capacity of CPU i, typically it is the
8222 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
8223 * can also include other factors [XXX].
8225 * To achieve this balance we define a measure of imbalance which follows
8226 * directly from (1):
8228 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
8230 * We them move tasks around to minimize the imbalance. In the continuous
8231 * function space it is obvious this converges, in the discrete case we get
8232 * a few fun cases generally called infeasible weight scenarios.
8235 * - infeasible weights;
8236 * - local vs global optima in the discrete case. ]
8241 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
8242 * for all i,j solution, we create a tree of CPUs that follows the hardware
8243 * topology where each level pairs two lower groups (or better). This results
8244 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
8245 * tree to only the first of the previous level and we decrease the frequency
8246 * of load-balance at each level inv. proportional to the number of CPUs in
8252 * \Sum { --- * --- * 2^i } = O(n) (5)
8254 * `- size of each group
8255 * | | `- number of CPUs doing load-balance
8257 * `- sum over all levels
8259 * Coupled with a limit on how many tasks we can migrate every balance pass,
8260 * this makes (5) the runtime complexity of the balancer.
8262 * An important property here is that each CPU is still (indirectly) connected
8263 * to every other CPU in at most O(log n) steps:
8265 * The adjacency matrix of the resulting graph is given by:
8268 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
8271 * And you'll find that:
8273 * A^(log_2 n)_i,j != 0 for all i,j (7)
8275 * Showing there's indeed a path between every CPU in at most O(log n) steps.
8276 * The task movement gives a factor of O(m), giving a convergence complexity
8279 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
8284 * In order to avoid CPUs going idle while there's still work to do, new idle
8285 * balancing is more aggressive and has the newly idle CPU iterate up the domain
8286 * tree itself instead of relying on other CPUs to bring it work.
8288 * This adds some complexity to both (5) and (8) but it reduces the total idle
8296 * Cgroups make a horror show out of (2), instead of a simple sum we get:
8299 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
8304 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
8306 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
8308 * The big problem is S_k, its a global sum needed to compute a local (W_i)
8311 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
8312 * rewrite all of this once again.]
8315 static unsigned long __read_mostly max_load_balance_interval
= HZ
/10;
8317 enum fbq_type
{ regular
, remote
, all
};
8320 * 'group_type' describes the group of CPUs at the moment of load balancing.
8322 * The enum is ordered by pulling priority, with the group with lowest priority
8323 * first so the group_type can simply be compared when selecting the busiest
8324 * group. See update_sd_pick_busiest().
8327 /* The group has spare capacity that can be used to run more tasks. */
8328 group_has_spare
= 0,
8330 * The group is fully used and the tasks don't compete for more CPU
8331 * cycles. Nevertheless, some tasks might wait before running.
8335 * One task doesn't fit with CPU's capacity and must be migrated to a
8336 * more powerful CPU.
8340 * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
8341 * and the task should be migrated to it instead of running on the
8346 * The tasks' affinity constraints previously prevented the scheduler
8347 * from balancing the load across the system.
8351 * The CPU is overloaded and can't provide expected CPU cycles to all
8357 enum migration_type
{
8364 #define LBF_ALL_PINNED 0x01
8365 #define LBF_NEED_BREAK 0x02
8366 #define LBF_DST_PINNED 0x04
8367 #define LBF_SOME_PINNED 0x08
8368 #define LBF_ACTIVE_LB 0x10
8371 struct sched_domain
*sd
;
8379 struct cpumask
*dst_grpmask
;
8381 enum cpu_idle_type idle
;
8383 /* The set of CPUs under consideration for load-balancing */
8384 struct cpumask
*cpus
;
8389 unsigned int loop_break
;
8390 unsigned int loop_max
;
8392 enum fbq_type fbq_type
;
8393 enum migration_type migration_type
;
8394 struct list_head tasks
;
8398 * Is this task likely cache-hot:
8400 static int task_hot(struct task_struct
*p
, struct lb_env
*env
)
8404 lockdep_assert_rq_held(env
->src_rq
);
8406 if (p
->sched_class
!= &fair_sched_class
)
8409 if (unlikely(task_has_idle_policy(p
)))
8412 /* SMT siblings share cache */
8413 if (env
->sd
->flags
& SD_SHARE_CPUCAPACITY
)
8417 * Buddy candidates are cache hot:
8419 if (sched_feat(CACHE_HOT_BUDDY
) && env
->dst_rq
->nr_running
&&
8420 (&p
->se
== cfs_rq_of(&p
->se
)->next
||
8421 &p
->se
== cfs_rq_of(&p
->se
)->last
))
8424 if (sysctl_sched_migration_cost
== -1)
8428 * Don't migrate task if the task's cookie does not match
8429 * with the destination CPU's core cookie.
8431 if (!sched_core_cookie_match(cpu_rq(env
->dst_cpu
), p
))
8434 if (sysctl_sched_migration_cost
== 0)
8437 delta
= rq_clock_task(env
->src_rq
) - p
->se
.exec_start
;
8439 return delta
< (s64
)sysctl_sched_migration_cost
;
8442 #ifdef CONFIG_NUMA_BALANCING
8444 * Returns 1, if task migration degrades locality
8445 * Returns 0, if task migration improves locality i.e migration preferred.
8446 * Returns -1, if task migration is not affected by locality.
8448 static int migrate_degrades_locality(struct task_struct
*p
, struct lb_env
*env
)
8450 struct numa_group
*numa_group
= rcu_dereference(p
->numa_group
);
8451 unsigned long src_weight
, dst_weight
;
8452 int src_nid
, dst_nid
, dist
;
8454 if (!static_branch_likely(&sched_numa_balancing
))
8457 if (!p
->numa_faults
|| !(env
->sd
->flags
& SD_NUMA
))
8460 src_nid
= cpu_to_node(env
->src_cpu
);
8461 dst_nid
= cpu_to_node(env
->dst_cpu
);
8463 if (src_nid
== dst_nid
)
8466 /* Migrating away from the preferred node is always bad. */
8467 if (src_nid
== p
->numa_preferred_nid
) {
8468 if (env
->src_rq
->nr_running
> env
->src_rq
->nr_preferred_running
)
8474 /* Encourage migration to the preferred node. */
8475 if (dst_nid
== p
->numa_preferred_nid
)
8478 /* Leaving a core idle is often worse than degrading locality. */
8479 if (env
->idle
== CPU_IDLE
)
8482 dist
= node_distance(src_nid
, dst_nid
);
8484 src_weight
= group_weight(p
, src_nid
, dist
);
8485 dst_weight
= group_weight(p
, dst_nid
, dist
);
8487 src_weight
= task_weight(p
, src_nid
, dist
);
8488 dst_weight
= task_weight(p
, dst_nid
, dist
);
8491 return dst_weight
< src_weight
;
8495 static inline int migrate_degrades_locality(struct task_struct
*p
,
8503 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
8506 int can_migrate_task(struct task_struct
*p
, struct lb_env
*env
)
8510 lockdep_assert_rq_held(env
->src_rq
);
8513 * We do not migrate tasks that are:
8514 * 1) throttled_lb_pair, or
8515 * 2) cannot be migrated to this CPU due to cpus_ptr, or
8516 * 3) running (obviously), or
8517 * 4) are cache-hot on their current CPU.
8519 if (throttled_lb_pair(task_group(p
), env
->src_cpu
, env
->dst_cpu
))
8522 /* Disregard pcpu kthreads; they are where they need to be. */
8523 if (kthread_is_per_cpu(p
))
8526 if (!cpumask_test_cpu(env
->dst_cpu
, p
->cpus_ptr
)) {
8529 schedstat_inc(p
->stats
.nr_failed_migrations_affine
);
8531 env
->flags
|= LBF_SOME_PINNED
;
8534 * Remember if this task can be migrated to any other CPU in
8535 * our sched_group. We may want to revisit it if we couldn't
8536 * meet load balance goals by pulling other tasks on src_cpu.
8538 * Avoid computing new_dst_cpu
8540 * - if we have already computed one in current iteration
8541 * - if it's an active balance
8543 if (env
->idle
== CPU_NEWLY_IDLE
||
8544 env
->flags
& (LBF_DST_PINNED
| LBF_ACTIVE_LB
))
8547 /* Prevent to re-select dst_cpu via env's CPUs: */
8548 for_each_cpu_and(cpu
, env
->dst_grpmask
, env
->cpus
) {
8549 if (cpumask_test_cpu(cpu
, p
->cpus_ptr
)) {
8550 env
->flags
|= LBF_DST_PINNED
;
8551 env
->new_dst_cpu
= cpu
;
8559 /* Record that we found at least one task that could run on dst_cpu */
8560 env
->flags
&= ~LBF_ALL_PINNED
;
8562 if (task_on_cpu(env
->src_rq
, p
)) {
8563 schedstat_inc(p
->stats
.nr_failed_migrations_running
);
8568 * Aggressive migration if:
8570 * 2) destination numa is preferred
8571 * 3) task is cache cold, or
8572 * 4) too many balance attempts have failed.
8574 if (env
->flags
& LBF_ACTIVE_LB
)
8577 tsk_cache_hot
= migrate_degrades_locality(p
, env
);
8578 if (tsk_cache_hot
== -1)
8579 tsk_cache_hot
= task_hot(p
, env
);
8581 if (tsk_cache_hot
<= 0 ||
8582 env
->sd
->nr_balance_failed
> env
->sd
->cache_nice_tries
) {
8583 if (tsk_cache_hot
== 1) {
8584 schedstat_inc(env
->sd
->lb_hot_gained
[env
->idle
]);
8585 schedstat_inc(p
->stats
.nr_forced_migrations
);
8590 schedstat_inc(p
->stats
.nr_failed_migrations_hot
);
8595 * detach_task() -- detach the task for the migration specified in env
8597 static void detach_task(struct task_struct
*p
, struct lb_env
*env
)
8599 lockdep_assert_rq_held(env
->src_rq
);
8601 deactivate_task(env
->src_rq
, p
, DEQUEUE_NOCLOCK
);
8602 set_task_cpu(p
, env
->dst_cpu
);
8606 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
8607 * part of active balancing operations within "domain".
8609 * Returns a task if successful and NULL otherwise.
8611 static struct task_struct
*detach_one_task(struct lb_env
*env
)
8613 struct task_struct
*p
;
8615 lockdep_assert_rq_held(env
->src_rq
);
8617 list_for_each_entry_reverse(p
,
8618 &env
->src_rq
->cfs_tasks
, se
.group_node
) {
8619 if (!can_migrate_task(p
, env
))
8622 detach_task(p
, env
);
8625 * Right now, this is only the second place where
8626 * lb_gained[env->idle] is updated (other is detach_tasks)
8627 * so we can safely collect stats here rather than
8628 * inside detach_tasks().
8630 schedstat_inc(env
->sd
->lb_gained
[env
->idle
]);
8637 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
8638 * busiest_rq, as part of a balancing operation within domain "sd".
8640 * Returns number of detached tasks if successful and 0 otherwise.
8642 static int detach_tasks(struct lb_env
*env
)
8644 struct list_head
*tasks
= &env
->src_rq
->cfs_tasks
;
8645 unsigned long util
, load
;
8646 struct task_struct
*p
;
8649 lockdep_assert_rq_held(env
->src_rq
);
8652 * Source run queue has been emptied by another CPU, clear
8653 * LBF_ALL_PINNED flag as we will not test any task.
8655 if (env
->src_rq
->nr_running
<= 1) {
8656 env
->flags
&= ~LBF_ALL_PINNED
;
8660 if (env
->imbalance
<= 0)
8663 while (!list_empty(tasks
)) {
8665 * We don't want to steal all, otherwise we may be treated likewise,
8666 * which could at worst lead to a livelock crash.
8668 if (env
->idle
!= CPU_NOT_IDLE
&& env
->src_rq
->nr_running
<= 1)
8673 * We've more or less seen every task there is, call it quits
8674 * unless we haven't found any movable task yet.
8676 if (env
->loop
> env
->loop_max
&&
8677 !(env
->flags
& LBF_ALL_PINNED
))
8680 /* take a breather every nr_migrate tasks */
8681 if (env
->loop
> env
->loop_break
) {
8682 env
->loop_break
+= SCHED_NR_MIGRATE_BREAK
;
8683 env
->flags
|= LBF_NEED_BREAK
;
8687 p
= list_last_entry(tasks
, struct task_struct
, se
.group_node
);
8689 if (!can_migrate_task(p
, env
))
8692 switch (env
->migration_type
) {
8695 * Depending of the number of CPUs and tasks and the
8696 * cgroup hierarchy, task_h_load() can return a null
8697 * value. Make sure that env->imbalance decreases
8698 * otherwise detach_tasks() will stop only after
8699 * detaching up to loop_max tasks.
8701 load
= max_t(unsigned long, task_h_load(p
), 1);
8703 if (sched_feat(LB_MIN
) &&
8704 load
< 16 && !env
->sd
->nr_balance_failed
)
8708 * Make sure that we don't migrate too much load.
8709 * Nevertheless, let relax the constraint if
8710 * scheduler fails to find a good waiting task to
8713 if (shr_bound(load
, env
->sd
->nr_balance_failed
) > env
->imbalance
)
8716 env
->imbalance
-= load
;
8720 util
= task_util_est(p
);
8722 if (util
> env
->imbalance
)
8725 env
->imbalance
-= util
;
8732 case migrate_misfit
:
8733 /* This is not a misfit task */
8734 if (task_fits_cpu(p
, env
->src_cpu
))
8741 detach_task(p
, env
);
8742 list_add(&p
->se
.group_node
, &env
->tasks
);
8746 #ifdef CONFIG_PREEMPTION
8748 * NEWIDLE balancing is a source of latency, so preemptible
8749 * kernels will stop after the first task is detached to minimize
8750 * the critical section.
8752 if (env
->idle
== CPU_NEWLY_IDLE
)
8757 * We only want to steal up to the prescribed amount of
8760 if (env
->imbalance
<= 0)
8765 list_move(&p
->se
.group_node
, tasks
);
8769 * Right now, this is one of only two places we collect this stat
8770 * so we can safely collect detach_one_task() stats here rather
8771 * than inside detach_one_task().
8773 schedstat_add(env
->sd
->lb_gained
[env
->idle
], detached
);
8779 * attach_task() -- attach the task detached by detach_task() to its new rq.
8781 static void attach_task(struct rq
*rq
, struct task_struct
*p
)
8783 lockdep_assert_rq_held(rq
);
8785 WARN_ON_ONCE(task_rq(p
) != rq
);
8786 activate_task(rq
, p
, ENQUEUE_NOCLOCK
);
8787 check_preempt_curr(rq
, p
, 0);
8791 * attach_one_task() -- attaches the task returned from detach_one_task() to
8794 static void attach_one_task(struct rq
*rq
, struct task_struct
*p
)
8799 update_rq_clock(rq
);
8805 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
8808 static void attach_tasks(struct lb_env
*env
)
8810 struct list_head
*tasks
= &env
->tasks
;
8811 struct task_struct
*p
;
8814 rq_lock(env
->dst_rq
, &rf
);
8815 update_rq_clock(env
->dst_rq
);
8817 while (!list_empty(tasks
)) {
8818 p
= list_first_entry(tasks
, struct task_struct
, se
.group_node
);
8819 list_del_init(&p
->se
.group_node
);
8821 attach_task(env
->dst_rq
, p
);
8824 rq_unlock(env
->dst_rq
, &rf
);
8827 #ifdef CONFIG_NO_HZ_COMMON
8828 static inline bool cfs_rq_has_blocked(struct cfs_rq
*cfs_rq
)
8830 if (cfs_rq
->avg
.load_avg
)
8833 if (cfs_rq
->avg
.util_avg
)
8839 static inline bool others_have_blocked(struct rq
*rq
)
8841 if (READ_ONCE(rq
->avg_rt
.util_avg
))
8844 if (READ_ONCE(rq
->avg_dl
.util_avg
))
8847 if (thermal_load_avg(rq
))
8850 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
8851 if (READ_ONCE(rq
->avg_irq
.util_avg
))
8858 static inline void update_blocked_load_tick(struct rq
*rq
)
8860 WRITE_ONCE(rq
->last_blocked_load_update_tick
, jiffies
);
8863 static inline void update_blocked_load_status(struct rq
*rq
, bool has_blocked
)
8866 rq
->has_blocked_load
= 0;
8869 static inline bool cfs_rq_has_blocked(struct cfs_rq
*cfs_rq
) { return false; }
8870 static inline bool others_have_blocked(struct rq
*rq
) { return false; }
8871 static inline void update_blocked_load_tick(struct rq
*rq
) {}
8872 static inline void update_blocked_load_status(struct rq
*rq
, bool has_blocked
) {}
8875 static bool __update_blocked_others(struct rq
*rq
, bool *done
)
8877 const struct sched_class
*curr_class
;
8878 u64 now
= rq_clock_pelt(rq
);
8879 unsigned long thermal_pressure
;
8883 * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
8884 * DL and IRQ signals have been updated before updating CFS.
8886 curr_class
= rq
->curr
->sched_class
;
8888 thermal_pressure
= arch_scale_thermal_pressure(cpu_of(rq
));
8890 decayed
= update_rt_rq_load_avg(now
, rq
, curr_class
== &rt_sched_class
) |
8891 update_dl_rq_load_avg(now
, rq
, curr_class
== &dl_sched_class
) |
8892 update_thermal_load_avg(rq_clock_thermal(rq
), rq
, thermal_pressure
) |
8893 update_irq_load_avg(rq
, 0);
8895 if (others_have_blocked(rq
))
8901 #ifdef CONFIG_FAIR_GROUP_SCHED
8903 static bool __update_blocked_fair(struct rq
*rq
, bool *done
)
8905 struct cfs_rq
*cfs_rq
, *pos
;
8906 bool decayed
= false;
8907 int cpu
= cpu_of(rq
);
8910 * Iterates the task_group tree in a bottom up fashion, see
8911 * list_add_leaf_cfs_rq() for details.
8913 for_each_leaf_cfs_rq_safe(rq
, cfs_rq
, pos
) {
8914 struct sched_entity
*se
;
8916 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq
), cfs_rq
)) {
8917 update_tg_load_avg(cfs_rq
);
8919 if (cfs_rq
->nr_running
== 0)
8920 update_idle_cfs_rq_clock_pelt(cfs_rq
);
8922 if (cfs_rq
== &rq
->cfs
)
8926 /* Propagate pending load changes to the parent, if any: */
8927 se
= cfs_rq
->tg
->se
[cpu
];
8928 if (se
&& !skip_blocked_update(se
))
8929 update_load_avg(cfs_rq_of(se
), se
, UPDATE_TG
);
8932 * There can be a lot of idle CPU cgroups. Don't let fully
8933 * decayed cfs_rqs linger on the list.
8935 if (cfs_rq_is_decayed(cfs_rq
))
8936 list_del_leaf_cfs_rq(cfs_rq
);
8938 /* Don't need periodic decay once load/util_avg are null */
8939 if (cfs_rq_has_blocked(cfs_rq
))
8947 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
8948 * This needs to be done in a top-down fashion because the load of a child
8949 * group is a fraction of its parents load.
8951 static void update_cfs_rq_h_load(struct cfs_rq
*cfs_rq
)
8953 struct rq
*rq
= rq_of(cfs_rq
);
8954 struct sched_entity
*se
= cfs_rq
->tg
->se
[cpu_of(rq
)];
8955 unsigned long now
= jiffies
;
8958 if (cfs_rq
->last_h_load_update
== now
)
8961 WRITE_ONCE(cfs_rq
->h_load_next
, NULL
);
8962 for_each_sched_entity(se
) {
8963 cfs_rq
= cfs_rq_of(se
);
8964 WRITE_ONCE(cfs_rq
->h_load_next
, se
);
8965 if (cfs_rq
->last_h_load_update
== now
)
8970 cfs_rq
->h_load
= cfs_rq_load_avg(cfs_rq
);
8971 cfs_rq
->last_h_load_update
= now
;
8974 while ((se
= READ_ONCE(cfs_rq
->h_load_next
)) != NULL
) {
8975 load
= cfs_rq
->h_load
;
8976 load
= div64_ul(load
* se
->avg
.load_avg
,
8977 cfs_rq_load_avg(cfs_rq
) + 1);
8978 cfs_rq
= group_cfs_rq(se
);
8979 cfs_rq
->h_load
= load
;
8980 cfs_rq
->last_h_load_update
= now
;
8984 static unsigned long task_h_load(struct task_struct
*p
)
8986 struct cfs_rq
*cfs_rq
= task_cfs_rq(p
);
8988 update_cfs_rq_h_load(cfs_rq
);
8989 return div64_ul(p
->se
.avg
.load_avg
* cfs_rq
->h_load
,
8990 cfs_rq_load_avg(cfs_rq
) + 1);
8993 static bool __update_blocked_fair(struct rq
*rq
, bool *done
)
8995 struct cfs_rq
*cfs_rq
= &rq
->cfs
;
8998 decayed
= update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq
), cfs_rq
);
8999 if (cfs_rq_has_blocked(cfs_rq
))
9005 static unsigned long task_h_load(struct task_struct
*p
)
9007 return p
->se
.avg
.load_avg
;
9011 static void update_blocked_averages(int cpu
)
9013 bool decayed
= false, done
= true;
9014 struct rq
*rq
= cpu_rq(cpu
);
9017 rq_lock_irqsave(rq
, &rf
);
9018 update_blocked_load_tick(rq
);
9019 update_rq_clock(rq
);
9021 decayed
|= __update_blocked_others(rq
, &done
);
9022 decayed
|= __update_blocked_fair(rq
, &done
);
9024 update_blocked_load_status(rq
, !done
);
9026 cpufreq_update_util(rq
, 0);
9027 rq_unlock_irqrestore(rq
, &rf
);
9030 /********** Helpers for find_busiest_group ************************/
9033 * sg_lb_stats - stats of a sched_group required for load_balancing
9035 struct sg_lb_stats
{
9036 unsigned long avg_load
; /*Avg load across the CPUs of the group */
9037 unsigned long group_load
; /* Total load over the CPUs of the group */
9038 unsigned long group_capacity
;
9039 unsigned long group_util
; /* Total utilization over the CPUs of the group */
9040 unsigned long group_runnable
; /* Total runnable time over the CPUs of the group */
9041 unsigned int sum_nr_running
; /* Nr of tasks running in the group */
9042 unsigned int sum_h_nr_running
; /* Nr of CFS tasks running in the group */
9043 unsigned int idle_cpus
;
9044 unsigned int group_weight
;
9045 enum group_type group_type
;
9046 unsigned int group_asym_packing
; /* Tasks should be moved to preferred CPU */
9047 unsigned long group_misfit_task_load
; /* A CPU has a task too big for its capacity */
9048 #ifdef CONFIG_NUMA_BALANCING
9049 unsigned int nr_numa_running
;
9050 unsigned int nr_preferred_running
;
9055 * sd_lb_stats - Structure to store the statistics of a sched_domain
9056 * during load balancing.
9058 struct sd_lb_stats
{
9059 struct sched_group
*busiest
; /* Busiest group in this sd */
9060 struct sched_group
*local
; /* Local group in this sd */
9061 unsigned long total_load
; /* Total load of all groups in sd */
9062 unsigned long total_capacity
; /* Total capacity of all groups in sd */
9063 unsigned long avg_load
; /* Average load across all groups in sd */
9064 unsigned int prefer_sibling
; /* tasks should go to sibling first */
9066 struct sg_lb_stats busiest_stat
;/* Statistics of the busiest group */
9067 struct sg_lb_stats local_stat
; /* Statistics of the local group */
9070 static inline void init_sd_lb_stats(struct sd_lb_stats
*sds
)
9073 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
9074 * local_stat because update_sg_lb_stats() does a full clear/assignment.
9075 * We must however set busiest_stat::group_type and
9076 * busiest_stat::idle_cpus to the worst busiest group because
9077 * update_sd_pick_busiest() reads these before assignment.
9079 *sds
= (struct sd_lb_stats
){
9083 .total_capacity
= 0UL,
9085 .idle_cpus
= UINT_MAX
,
9086 .group_type
= group_has_spare
,
9091 static unsigned long scale_rt_capacity(int cpu
)
9093 struct rq
*rq
= cpu_rq(cpu
);
9094 unsigned long max
= arch_scale_cpu_capacity(cpu
);
9095 unsigned long used
, free
;
9098 irq
= cpu_util_irq(rq
);
9100 if (unlikely(irq
>= max
))
9104 * avg_rt.util_avg and avg_dl.util_avg track binary signals
9105 * (running and not running) with weights 0 and 1024 respectively.
9106 * avg_thermal.load_avg tracks thermal pressure and the weighted
9107 * average uses the actual delta max capacity(load).
9109 used
= READ_ONCE(rq
->avg_rt
.util_avg
);
9110 used
+= READ_ONCE(rq
->avg_dl
.util_avg
);
9111 used
+= thermal_load_avg(rq
);
9113 if (unlikely(used
>= max
))
9118 return scale_irq_capacity(free
, irq
, max
);
9121 static void update_cpu_capacity(struct sched_domain
*sd
, int cpu
)
9123 unsigned long capacity
= scale_rt_capacity(cpu
);
9124 struct sched_group
*sdg
= sd
->groups
;
9126 cpu_rq(cpu
)->cpu_capacity_orig
= arch_scale_cpu_capacity(cpu
);
9131 cpu_rq(cpu
)->cpu_capacity
= capacity
;
9132 trace_sched_cpu_capacity_tp(cpu_rq(cpu
));
9134 sdg
->sgc
->capacity
= capacity
;
9135 sdg
->sgc
->min_capacity
= capacity
;
9136 sdg
->sgc
->max_capacity
= capacity
;
9139 void update_group_capacity(struct sched_domain
*sd
, int cpu
)
9141 struct sched_domain
*child
= sd
->child
;
9142 struct sched_group
*group
, *sdg
= sd
->groups
;
9143 unsigned long capacity
, min_capacity
, max_capacity
;
9144 unsigned long interval
;
9146 interval
= msecs_to_jiffies(sd
->balance_interval
);
9147 interval
= clamp(interval
, 1UL, max_load_balance_interval
);
9148 sdg
->sgc
->next_update
= jiffies
+ interval
;
9151 update_cpu_capacity(sd
, cpu
);
9156 min_capacity
= ULONG_MAX
;
9159 if (child
->flags
& SD_OVERLAP
) {
9161 * SD_OVERLAP domains cannot assume that child groups
9162 * span the current group.
9165 for_each_cpu(cpu
, sched_group_span(sdg
)) {
9166 unsigned long cpu_cap
= capacity_of(cpu
);
9168 capacity
+= cpu_cap
;
9169 min_capacity
= min(cpu_cap
, min_capacity
);
9170 max_capacity
= max(cpu_cap
, max_capacity
);
9174 * !SD_OVERLAP domains can assume that child groups
9175 * span the current group.
9178 group
= child
->groups
;
9180 struct sched_group_capacity
*sgc
= group
->sgc
;
9182 capacity
+= sgc
->capacity
;
9183 min_capacity
= min(sgc
->min_capacity
, min_capacity
);
9184 max_capacity
= max(sgc
->max_capacity
, max_capacity
);
9185 group
= group
->next
;
9186 } while (group
!= child
->groups
);
9189 sdg
->sgc
->capacity
= capacity
;
9190 sdg
->sgc
->min_capacity
= min_capacity
;
9191 sdg
->sgc
->max_capacity
= max_capacity
;
9195 * Check whether the capacity of the rq has been noticeably reduced by side
9196 * activity. The imbalance_pct is used for the threshold.
9197 * Return true is the capacity is reduced
9200 check_cpu_capacity(struct rq
*rq
, struct sched_domain
*sd
)
9202 return ((rq
->cpu_capacity
* sd
->imbalance_pct
) <
9203 (rq
->cpu_capacity_orig
* 100));
9207 * Check whether a rq has a misfit task and if it looks like we can actually
9208 * help that task: we can migrate the task to a CPU of higher capacity, or
9209 * the task's current CPU is heavily pressured.
9211 static inline int check_misfit_status(struct rq
*rq
, struct sched_domain
*sd
)
9213 return rq
->misfit_task_load
&&
9214 (rq
->cpu_capacity_orig
< rq
->rd
->max_cpu_capacity
||
9215 check_cpu_capacity(rq
, sd
));
9219 * Group imbalance indicates (and tries to solve) the problem where balancing
9220 * groups is inadequate due to ->cpus_ptr constraints.
9222 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
9223 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
9226 * { 0 1 2 3 } { 4 5 6 7 }
9229 * If we were to balance group-wise we'd place two tasks in the first group and
9230 * two tasks in the second group. Clearly this is undesired as it will overload
9231 * cpu 3 and leave one of the CPUs in the second group unused.
9233 * The current solution to this issue is detecting the skew in the first group
9234 * by noticing the lower domain failed to reach balance and had difficulty
9235 * moving tasks due to affinity constraints.
9237 * When this is so detected; this group becomes a candidate for busiest; see
9238 * update_sd_pick_busiest(). And calculate_imbalance() and
9239 * find_busiest_group() avoid some of the usual balance conditions to allow it
9240 * to create an effective group imbalance.
9242 * This is a somewhat tricky proposition since the next run might not find the
9243 * group imbalance and decide the groups need to be balanced again. A most
9244 * subtle and fragile situation.
9247 static inline int sg_imbalanced(struct sched_group
*group
)
9249 return group
->sgc
->imbalance
;
9253 * group_has_capacity returns true if the group has spare capacity that could
9254 * be used by some tasks.
9255 * We consider that a group has spare capacity if the number of task is
9256 * smaller than the number of CPUs or if the utilization is lower than the
9257 * available capacity for CFS tasks.
9258 * For the latter, we use a threshold to stabilize the state, to take into
9259 * account the variance of the tasks' load and to return true if the available
9260 * capacity in meaningful for the load balancer.
9261 * As an example, an available capacity of 1% can appear but it doesn't make
9262 * any benefit for the load balance.
9265 group_has_capacity(unsigned int imbalance_pct
, struct sg_lb_stats
*sgs
)
9267 if (sgs
->sum_nr_running
< sgs
->group_weight
)
9270 if ((sgs
->group_capacity
* imbalance_pct
) <
9271 (sgs
->group_runnable
* 100))
9274 if ((sgs
->group_capacity
* 100) >
9275 (sgs
->group_util
* imbalance_pct
))
9282 * group_is_overloaded returns true if the group has more tasks than it can
9284 * group_is_overloaded is not equals to !group_has_capacity because a group
9285 * with the exact right number of tasks, has no more spare capacity but is not
9286 * overloaded so both group_has_capacity and group_is_overloaded return
9290 group_is_overloaded(unsigned int imbalance_pct
, struct sg_lb_stats
*sgs
)
9292 if (sgs
->sum_nr_running
<= sgs
->group_weight
)
9295 if ((sgs
->group_capacity
* 100) <
9296 (sgs
->group_util
* imbalance_pct
))
9299 if ((sgs
->group_capacity
* imbalance_pct
) <
9300 (sgs
->group_runnable
* 100))
9307 group_type
group_classify(unsigned int imbalance_pct
,
9308 struct sched_group
*group
,
9309 struct sg_lb_stats
*sgs
)
9311 if (group_is_overloaded(imbalance_pct
, sgs
))
9312 return group_overloaded
;
9314 if (sg_imbalanced(group
))
9315 return group_imbalanced
;
9317 if (sgs
->group_asym_packing
)
9318 return group_asym_packing
;
9320 if (sgs
->group_misfit_task_load
)
9321 return group_misfit_task
;
9323 if (!group_has_capacity(imbalance_pct
, sgs
))
9324 return group_fully_busy
;
9326 return group_has_spare
;
9330 * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
9331 * @dst_cpu: Destination CPU of the load balancing
9332 * @sds: Load-balancing data with statistics of the local group
9333 * @sgs: Load-balancing statistics of the candidate busiest group
9334 * @sg: The candidate busiest group
9336 * Check the state of the SMT siblings of both @sds::local and @sg and decide
9337 * if @dst_cpu can pull tasks.
9339 * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
9340 * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
9341 * only if @dst_cpu has higher priority.
9343 * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more
9344 * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority.
9345 * Bigger imbalances in the number of busy CPUs will be dealt with in
9346 * update_sd_pick_busiest().
9348 * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
9349 * of @dst_cpu are idle and @sg has lower priority.
9351 * Return: true if @dst_cpu can pull tasks, false otherwise.
9353 static bool asym_smt_can_pull_tasks(int dst_cpu
, struct sd_lb_stats
*sds
,
9354 struct sg_lb_stats
*sgs
,
9355 struct sched_group
*sg
)
9357 #ifdef CONFIG_SCHED_SMT
9358 bool local_is_smt
, sg_is_smt
;
9361 local_is_smt
= sds
->local
->flags
& SD_SHARE_CPUCAPACITY
;
9362 sg_is_smt
= sg
->flags
& SD_SHARE_CPUCAPACITY
;
9364 sg_busy_cpus
= sgs
->group_weight
- sgs
->idle_cpus
;
9366 if (!local_is_smt
) {
9368 * If we are here, @dst_cpu is idle and does not have SMT
9369 * siblings. Pull tasks if candidate group has two or more
9372 if (sg_busy_cpus
>= 2) /* implies sg_is_smt */
9376 * @dst_cpu does not have SMT siblings. @sg may have SMT
9377 * siblings and only one is busy. In such case, @dst_cpu
9378 * can help if it has higher priority and is idle (i.e.,
9379 * it has no running tasks).
9381 return sched_asym_prefer(dst_cpu
, sg
->asym_prefer_cpu
);
9384 /* @dst_cpu has SMT siblings. */
9387 int local_busy_cpus
= sds
->local
->group_weight
-
9388 sds
->local_stat
.idle_cpus
;
9389 int busy_cpus_delta
= sg_busy_cpus
- local_busy_cpus
;
9391 if (busy_cpus_delta
== 1)
9392 return sched_asym_prefer(dst_cpu
, sg
->asym_prefer_cpu
);
9398 * @sg does not have SMT siblings. Ensure that @sds::local does not end
9399 * up with more than one busy SMT sibling and only pull tasks if there
9400 * are not busy CPUs (i.e., no CPU has running tasks).
9402 if (!sds
->local_stat
.sum_nr_running
)
9403 return sched_asym_prefer(dst_cpu
, sg
->asym_prefer_cpu
);
9407 /* Always return false so that callers deal with non-SMT cases. */
9413 sched_asym(struct lb_env
*env
, struct sd_lb_stats
*sds
, struct sg_lb_stats
*sgs
,
9414 struct sched_group
*group
)
9416 /* Only do SMT checks if either local or candidate have SMT siblings */
9417 if ((sds
->local
->flags
& SD_SHARE_CPUCAPACITY
) ||
9418 (group
->flags
& SD_SHARE_CPUCAPACITY
))
9419 return asym_smt_can_pull_tasks(env
->dst_cpu
, sds
, sgs
, group
);
9421 return sched_asym_prefer(env
->dst_cpu
, group
->asym_prefer_cpu
);
9425 sched_reduced_capacity(struct rq
*rq
, struct sched_domain
*sd
)
9428 * When there is more than 1 task, the group_overloaded case already
9429 * takes care of cpu with reduced capacity
9431 if (rq
->cfs
.h_nr_running
!= 1)
9434 return check_cpu_capacity(rq
, sd
);
9438 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
9439 * @env: The load balancing environment.
9440 * @sds: Load-balancing data with statistics of the local group.
9441 * @group: sched_group whose statistics are to be updated.
9442 * @sgs: variable to hold the statistics for this group.
9443 * @sg_status: Holds flag indicating the status of the sched_group
9445 static inline void update_sg_lb_stats(struct lb_env
*env
,
9446 struct sd_lb_stats
*sds
,
9447 struct sched_group
*group
,
9448 struct sg_lb_stats
*sgs
,
9451 int i
, nr_running
, local_group
;
9453 memset(sgs
, 0, sizeof(*sgs
));
9455 local_group
= group
== sds
->local
;
9457 for_each_cpu_and(i
, sched_group_span(group
), env
->cpus
) {
9458 struct rq
*rq
= cpu_rq(i
);
9459 unsigned long load
= cpu_load(rq
);
9461 sgs
->group_load
+= load
;
9462 sgs
->group_util
+= cpu_util_cfs(i
);
9463 sgs
->group_runnable
+= cpu_runnable(rq
);
9464 sgs
->sum_h_nr_running
+= rq
->cfs
.h_nr_running
;
9466 nr_running
= rq
->nr_running
;
9467 sgs
->sum_nr_running
+= nr_running
;
9470 *sg_status
|= SG_OVERLOAD
;
9472 if (cpu_overutilized(i
))
9473 *sg_status
|= SG_OVERUTILIZED
;
9475 #ifdef CONFIG_NUMA_BALANCING
9476 sgs
->nr_numa_running
+= rq
->nr_numa_running
;
9477 sgs
->nr_preferred_running
+= rq
->nr_preferred_running
;
9480 * No need to call idle_cpu() if nr_running is not 0
9482 if (!nr_running
&& idle_cpu(i
)) {
9484 /* Idle cpu can't have misfit task */
9491 if (env
->sd
->flags
& SD_ASYM_CPUCAPACITY
) {
9492 /* Check for a misfit task on the cpu */
9493 if (sgs
->group_misfit_task_load
< rq
->misfit_task_load
) {
9494 sgs
->group_misfit_task_load
= rq
->misfit_task_load
;
9495 *sg_status
|= SG_OVERLOAD
;
9497 } else if ((env
->idle
!= CPU_NOT_IDLE
) &&
9498 sched_reduced_capacity(rq
, env
->sd
)) {
9499 /* Check for a task running on a CPU with reduced capacity */
9500 if (sgs
->group_misfit_task_load
< load
)
9501 sgs
->group_misfit_task_load
= load
;
9505 sgs
->group_capacity
= group
->sgc
->capacity
;
9507 sgs
->group_weight
= group
->group_weight
;
9509 /* Check if dst CPU is idle and preferred to this group */
9510 if (!local_group
&& env
->sd
->flags
& SD_ASYM_PACKING
&&
9511 env
->idle
!= CPU_NOT_IDLE
&& sgs
->sum_h_nr_running
&&
9512 sched_asym(env
, sds
, sgs
, group
)) {
9513 sgs
->group_asym_packing
= 1;
9516 sgs
->group_type
= group_classify(env
->sd
->imbalance_pct
, group
, sgs
);
9518 /* Computing avg_load makes sense only when group is overloaded */
9519 if (sgs
->group_type
== group_overloaded
)
9520 sgs
->avg_load
= (sgs
->group_load
* SCHED_CAPACITY_SCALE
) /
9521 sgs
->group_capacity
;
9525 * update_sd_pick_busiest - return 1 on busiest group
9526 * @env: The load balancing environment.
9527 * @sds: sched_domain statistics
9528 * @sg: sched_group candidate to be checked for being the busiest
9529 * @sgs: sched_group statistics
9531 * Determine if @sg is a busier group than the previously selected
9534 * Return: %true if @sg is a busier group than the previously selected
9535 * busiest group. %false otherwise.
9537 static bool update_sd_pick_busiest(struct lb_env
*env
,
9538 struct sd_lb_stats
*sds
,
9539 struct sched_group
*sg
,
9540 struct sg_lb_stats
*sgs
)
9542 struct sg_lb_stats
*busiest
= &sds
->busiest_stat
;
9544 /* Make sure that there is at least one task to pull */
9545 if (!sgs
->sum_h_nr_running
)
9549 * Don't try to pull misfit tasks we can't help.
9550 * We can use max_capacity here as reduction in capacity on some
9551 * CPUs in the group should either be possible to resolve
9552 * internally or be covered by avg_load imbalance (eventually).
9554 if ((env
->sd
->flags
& SD_ASYM_CPUCAPACITY
) &&
9555 (sgs
->group_type
== group_misfit_task
) &&
9556 (!capacity_greater(capacity_of(env
->dst_cpu
), sg
->sgc
->max_capacity
) ||
9557 sds
->local_stat
.group_type
!= group_has_spare
))
9560 if (sgs
->group_type
> busiest
->group_type
)
9563 if (sgs
->group_type
< busiest
->group_type
)
9567 * The candidate and the current busiest group are the same type of
9568 * group. Let check which one is the busiest according to the type.
9571 switch (sgs
->group_type
) {
9572 case group_overloaded
:
9573 /* Select the overloaded group with highest avg_load. */
9574 if (sgs
->avg_load
<= busiest
->avg_load
)
9578 case group_imbalanced
:
9580 * Select the 1st imbalanced group as we don't have any way to
9581 * choose one more than another.
9585 case group_asym_packing
:
9586 /* Prefer to move from lowest priority CPU's work */
9587 if (sched_asym_prefer(sg
->asym_prefer_cpu
, sds
->busiest
->asym_prefer_cpu
))
9591 case group_misfit_task
:
9593 * If we have more than one misfit sg go with the biggest
9596 if (sgs
->group_misfit_task_load
< busiest
->group_misfit_task_load
)
9600 case group_fully_busy
:
9602 * Select the fully busy group with highest avg_load. In
9603 * theory, there is no need to pull task from such kind of
9604 * group because tasks have all compute capacity that they need
9605 * but we can still improve the overall throughput by reducing
9606 * contention when accessing shared HW resources.
9608 * XXX for now avg_load is not computed and always 0 so we
9609 * select the 1st one.
9611 if (sgs
->avg_load
<= busiest
->avg_load
)
9615 case group_has_spare
:
9617 * Select not overloaded group with lowest number of idle cpus
9618 * and highest number of running tasks. We could also compare
9619 * the spare capacity which is more stable but it can end up
9620 * that the group has less spare capacity but finally more idle
9621 * CPUs which means less opportunity to pull tasks.
9623 if (sgs
->idle_cpus
> busiest
->idle_cpus
)
9625 else if ((sgs
->idle_cpus
== busiest
->idle_cpus
) &&
9626 (sgs
->sum_nr_running
<= busiest
->sum_nr_running
))
9633 * Candidate sg has no more than one task per CPU and has higher
9634 * per-CPU capacity. Migrating tasks to less capable CPUs may harm
9635 * throughput. Maximize throughput, power/energy consequences are not
9638 if ((env
->sd
->flags
& SD_ASYM_CPUCAPACITY
) &&
9639 (sgs
->group_type
<= group_fully_busy
) &&
9640 (capacity_greater(sg
->sgc
->min_capacity
, capacity_of(env
->dst_cpu
))))
9646 #ifdef CONFIG_NUMA_BALANCING
9647 static inline enum fbq_type
fbq_classify_group(struct sg_lb_stats
*sgs
)
9649 if (sgs
->sum_h_nr_running
> sgs
->nr_numa_running
)
9651 if (sgs
->sum_h_nr_running
> sgs
->nr_preferred_running
)
9656 static inline enum fbq_type
fbq_classify_rq(struct rq
*rq
)
9658 if (rq
->nr_running
> rq
->nr_numa_running
)
9660 if (rq
->nr_running
> rq
->nr_preferred_running
)
9665 static inline enum fbq_type
fbq_classify_group(struct sg_lb_stats
*sgs
)
9670 static inline enum fbq_type
fbq_classify_rq(struct rq
*rq
)
9674 #endif /* CONFIG_NUMA_BALANCING */
9680 * task_running_on_cpu - return 1 if @p is running on @cpu.
9683 static unsigned int task_running_on_cpu(int cpu
, struct task_struct
*p
)
9685 /* Task has no contribution or is new */
9686 if (cpu
!= task_cpu(p
) || !READ_ONCE(p
->se
.avg
.last_update_time
))
9689 if (task_on_rq_queued(p
))
9696 * idle_cpu_without - would a given CPU be idle without p ?
9697 * @cpu: the processor on which idleness is tested.
9698 * @p: task which should be ignored.
9700 * Return: 1 if the CPU would be idle. 0 otherwise.
9702 static int idle_cpu_without(int cpu
, struct task_struct
*p
)
9704 struct rq
*rq
= cpu_rq(cpu
);
9706 if (rq
->curr
!= rq
->idle
&& rq
->curr
!= p
)
9710 * rq->nr_running can't be used but an updated version without the
9711 * impact of p on cpu must be used instead. The updated nr_running
9712 * be computed and tested before calling idle_cpu_without().
9716 if (rq
->ttwu_pending
)
9724 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
9725 * @sd: The sched_domain level to look for idlest group.
9726 * @group: sched_group whose statistics are to be updated.
9727 * @sgs: variable to hold the statistics for this group.
9728 * @p: The task for which we look for the idlest group/CPU.
9730 static inline void update_sg_wakeup_stats(struct sched_domain
*sd
,
9731 struct sched_group
*group
,
9732 struct sg_lb_stats
*sgs
,
9733 struct task_struct
*p
)
9737 memset(sgs
, 0, sizeof(*sgs
));
9739 /* Assume that task can't fit any CPU of the group */
9740 if (sd
->flags
& SD_ASYM_CPUCAPACITY
)
9741 sgs
->group_misfit_task_load
= 1;
9743 for_each_cpu(i
, sched_group_span(group
)) {
9744 struct rq
*rq
= cpu_rq(i
);
9747 sgs
->group_load
+= cpu_load_without(rq
, p
);
9748 sgs
->group_util
+= cpu_util_without(i
, p
);
9749 sgs
->group_runnable
+= cpu_runnable_without(rq
, p
);
9750 local
= task_running_on_cpu(i
, p
);
9751 sgs
->sum_h_nr_running
+= rq
->cfs
.h_nr_running
- local
;
9753 nr_running
= rq
->nr_running
- local
;
9754 sgs
->sum_nr_running
+= nr_running
;
9757 * No need to call idle_cpu_without() if nr_running is not 0
9759 if (!nr_running
&& idle_cpu_without(i
, p
))
9762 /* Check if task fits in the CPU */
9763 if (sd
->flags
& SD_ASYM_CPUCAPACITY
&&
9764 sgs
->group_misfit_task_load
&&
9765 task_fits_cpu(p
, i
))
9766 sgs
->group_misfit_task_load
= 0;
9770 sgs
->group_capacity
= group
->sgc
->capacity
;
9772 sgs
->group_weight
= group
->group_weight
;
9774 sgs
->group_type
= group_classify(sd
->imbalance_pct
, group
, sgs
);
9777 * Computing avg_load makes sense only when group is fully busy or
9780 if (sgs
->group_type
== group_fully_busy
||
9781 sgs
->group_type
== group_overloaded
)
9782 sgs
->avg_load
= (sgs
->group_load
* SCHED_CAPACITY_SCALE
) /
9783 sgs
->group_capacity
;
9786 static bool update_pick_idlest(struct sched_group
*idlest
,
9787 struct sg_lb_stats
*idlest_sgs
,
9788 struct sched_group
*group
,
9789 struct sg_lb_stats
*sgs
)
9791 if (sgs
->group_type
< idlest_sgs
->group_type
)
9794 if (sgs
->group_type
> idlest_sgs
->group_type
)
9798 * The candidate and the current idlest group are the same type of
9799 * group. Let check which one is the idlest according to the type.
9802 switch (sgs
->group_type
) {
9803 case group_overloaded
:
9804 case group_fully_busy
:
9805 /* Select the group with lowest avg_load. */
9806 if (idlest_sgs
->avg_load
<= sgs
->avg_load
)
9810 case group_imbalanced
:
9811 case group_asym_packing
:
9812 /* Those types are not used in the slow wakeup path */
9815 case group_misfit_task
:
9816 /* Select group with the highest max capacity */
9817 if (idlest
->sgc
->max_capacity
>= group
->sgc
->max_capacity
)
9821 case group_has_spare
:
9822 /* Select group with most idle CPUs */
9823 if (idlest_sgs
->idle_cpus
> sgs
->idle_cpus
)
9826 /* Select group with lowest group_util */
9827 if (idlest_sgs
->idle_cpus
== sgs
->idle_cpus
&&
9828 idlest_sgs
->group_util
<= sgs
->group_util
)
9838 * find_idlest_group() finds and returns the least busy CPU group within the
9841 * Assumes p is allowed on at least one CPU in sd.
9843 static struct sched_group
*
9844 find_idlest_group(struct sched_domain
*sd
, struct task_struct
*p
, int this_cpu
)
9846 struct sched_group
*idlest
= NULL
, *local
= NULL
, *group
= sd
->groups
;
9847 struct sg_lb_stats local_sgs
, tmp_sgs
;
9848 struct sg_lb_stats
*sgs
;
9849 unsigned long imbalance
;
9850 struct sg_lb_stats idlest_sgs
= {
9851 .avg_load
= UINT_MAX
,
9852 .group_type
= group_overloaded
,
9858 /* Skip over this group if it has no CPUs allowed */
9859 if (!cpumask_intersects(sched_group_span(group
),
9863 /* Skip over this group if no cookie matched */
9864 if (!sched_group_cookie_match(cpu_rq(this_cpu
), p
, group
))
9867 local_group
= cpumask_test_cpu(this_cpu
,
9868 sched_group_span(group
));
9877 update_sg_wakeup_stats(sd
, group
, sgs
, p
);
9879 if (!local_group
&& update_pick_idlest(idlest
, &idlest_sgs
, group
, sgs
)) {
9884 } while (group
= group
->next
, group
!= sd
->groups
);
9887 /* There is no idlest group to push tasks to */
9891 /* The local group has been skipped because of CPU affinity */
9896 * If the local group is idler than the selected idlest group
9897 * don't try and push the task.
9899 if (local_sgs
.group_type
< idlest_sgs
.group_type
)
9903 * If the local group is busier than the selected idlest group
9904 * try and push the task.
9906 if (local_sgs
.group_type
> idlest_sgs
.group_type
)
9909 switch (local_sgs
.group_type
) {
9910 case group_overloaded
:
9911 case group_fully_busy
:
9913 /* Calculate allowed imbalance based on load */
9914 imbalance
= scale_load_down(NICE_0_LOAD
) *
9915 (sd
->imbalance_pct
-100) / 100;
9918 * When comparing groups across NUMA domains, it's possible for
9919 * the local domain to be very lightly loaded relative to the
9920 * remote domains but "imbalance" skews the comparison making
9921 * remote CPUs look much more favourable. When considering
9922 * cross-domain, add imbalance to the load on the remote node
9923 * and consider staying local.
9926 if ((sd
->flags
& SD_NUMA
) &&
9927 ((idlest_sgs
.avg_load
+ imbalance
) >= local_sgs
.avg_load
))
9931 * If the local group is less loaded than the selected
9932 * idlest group don't try and push any tasks.
9934 if (idlest_sgs
.avg_load
>= (local_sgs
.avg_load
+ imbalance
))
9937 if (100 * local_sgs
.avg_load
<= sd
->imbalance_pct
* idlest_sgs
.avg_load
)
9941 case group_imbalanced
:
9942 case group_asym_packing
:
9943 /* Those type are not used in the slow wakeup path */
9946 case group_misfit_task
:
9947 /* Select group with the highest max capacity */
9948 if (local
->sgc
->max_capacity
>= idlest
->sgc
->max_capacity
)
9952 case group_has_spare
:
9954 if (sd
->flags
& SD_NUMA
) {
9955 int imb_numa_nr
= sd
->imb_numa_nr
;
9956 #ifdef CONFIG_NUMA_BALANCING
9959 * If there is spare capacity at NUMA, try to select
9960 * the preferred node
9962 if (cpu_to_node(this_cpu
) == p
->numa_preferred_nid
)
9965 idlest_cpu
= cpumask_first(sched_group_span(idlest
));
9966 if (cpu_to_node(idlest_cpu
) == p
->numa_preferred_nid
)
9968 #endif /* CONFIG_NUMA_BALANCING */
9970 * Otherwise, keep the task close to the wakeup source
9971 * and improve locality if the number of running tasks
9972 * would remain below threshold where an imbalance is
9973 * allowed while accounting for the possibility the
9974 * task is pinned to a subset of CPUs. If there is a
9975 * real need of migration, periodic load balance will
9978 if (p
->nr_cpus_allowed
!= NR_CPUS
) {
9979 struct cpumask
*cpus
= this_cpu_cpumask_var_ptr(select_rq_mask
);
9981 cpumask_and(cpus
, sched_group_span(local
), p
->cpus_ptr
);
9982 imb_numa_nr
= min(cpumask_weight(cpus
), sd
->imb_numa_nr
);
9985 imbalance
= abs(local_sgs
.idle_cpus
- idlest_sgs
.idle_cpus
);
9986 if (!adjust_numa_imbalance(imbalance
,
9987 local_sgs
.sum_nr_running
+ 1,
9992 #endif /* CONFIG_NUMA */
9995 * Select group with highest number of idle CPUs. We could also
9996 * compare the utilization which is more stable but it can end
9997 * up that the group has less spare capacity but finally more
9998 * idle CPUs which means more opportunity to run task.
10000 if (local_sgs
.idle_cpus
>= idlest_sgs
.idle_cpus
)
10008 static void update_idle_cpu_scan(struct lb_env
*env
,
10009 unsigned long sum_util
)
10011 struct sched_domain_shared
*sd_share
;
10012 int llc_weight
, pct
;
10015 * Update the number of CPUs to scan in LLC domain, which could
10016 * be used as a hint in select_idle_cpu(). The update of sd_share
10017 * could be expensive because it is within a shared cache line.
10018 * So the write of this hint only occurs during periodic load
10019 * balancing, rather than CPU_NEWLY_IDLE, because the latter
10020 * can fire way more frequently than the former.
10022 if (!sched_feat(SIS_UTIL
) || env
->idle
== CPU_NEWLY_IDLE
)
10025 llc_weight
= per_cpu(sd_llc_size
, env
->dst_cpu
);
10026 if (env
->sd
->span_weight
!= llc_weight
)
10029 sd_share
= rcu_dereference(per_cpu(sd_llc_shared
, env
->dst_cpu
));
10034 * The number of CPUs to search drops as sum_util increases, when
10035 * sum_util hits 85% or above, the scan stops.
10036 * The reason to choose 85% as the threshold is because this is the
10037 * imbalance_pct(117) when a LLC sched group is overloaded.
10039 * let y = SCHED_CAPACITY_SCALE - p * x^2 [1]
10040 * and y'= y / SCHED_CAPACITY_SCALE
10042 * x is the ratio of sum_util compared to the CPU capacity:
10043 * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
10044 * y' is the ratio of CPUs to be scanned in the LLC domain,
10045 * and the number of CPUs to scan is calculated by:
10047 * nr_scan = llc_weight * y' [2]
10049 * When x hits the threshold of overloaded, AKA, when
10050 * x = 100 / pct, y drops to 0. According to [1],
10051 * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
10053 * Scale x by SCHED_CAPACITY_SCALE:
10054 * x' = sum_util / llc_weight; [3]
10056 * and finally [1] becomes:
10057 * y = SCHED_CAPACITY_SCALE -
10058 * x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE) [4]
10063 do_div(x
, llc_weight
);
10066 pct
= env
->sd
->imbalance_pct
;
10067 tmp
= x
* x
* pct
* pct
;
10068 do_div(tmp
, 10000 * SCHED_CAPACITY_SCALE
);
10069 tmp
= min_t(long, tmp
, SCHED_CAPACITY_SCALE
);
10070 y
= SCHED_CAPACITY_SCALE
- tmp
;
10074 do_div(y
, SCHED_CAPACITY_SCALE
);
10075 if ((int)y
!= sd_share
->nr_idle_scan
)
10076 WRITE_ONCE(sd_share
->nr_idle_scan
, (int)y
);
10080 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
10081 * @env: The load balancing environment.
10082 * @sds: variable to hold the statistics for this sched_domain.
10085 static inline void update_sd_lb_stats(struct lb_env
*env
, struct sd_lb_stats
*sds
)
10087 struct sched_domain
*child
= env
->sd
->child
;
10088 struct sched_group
*sg
= env
->sd
->groups
;
10089 struct sg_lb_stats
*local
= &sds
->local_stat
;
10090 struct sg_lb_stats tmp_sgs
;
10091 unsigned long sum_util
= 0;
10095 struct sg_lb_stats
*sgs
= &tmp_sgs
;
10098 local_group
= cpumask_test_cpu(env
->dst_cpu
, sched_group_span(sg
));
10103 if (env
->idle
!= CPU_NEWLY_IDLE
||
10104 time_after_eq(jiffies
, sg
->sgc
->next_update
))
10105 update_group_capacity(env
->sd
, env
->dst_cpu
);
10108 update_sg_lb_stats(env
, sds
, sg
, sgs
, &sg_status
);
10114 if (update_sd_pick_busiest(env
, sds
, sg
, sgs
)) {
10116 sds
->busiest_stat
= *sgs
;
10120 /* Now, start updating sd_lb_stats */
10121 sds
->total_load
+= sgs
->group_load
;
10122 sds
->total_capacity
+= sgs
->group_capacity
;
10124 sum_util
+= sgs
->group_util
;
10126 } while (sg
!= env
->sd
->groups
);
10128 /* Tag domain that child domain prefers tasks go to siblings first */
10129 sds
->prefer_sibling
= child
&& child
->flags
& SD_PREFER_SIBLING
;
10132 if (env
->sd
->flags
& SD_NUMA
)
10133 env
->fbq_type
= fbq_classify_group(&sds
->busiest_stat
);
10135 if (!env
->sd
->parent
) {
10136 struct root_domain
*rd
= env
->dst_rq
->rd
;
10138 /* update overload indicator if we are at root domain */
10139 WRITE_ONCE(rd
->overload
, sg_status
& SG_OVERLOAD
);
10141 /* Update over-utilization (tipping point, U >= 0) indicator */
10142 WRITE_ONCE(rd
->overutilized
, sg_status
& SG_OVERUTILIZED
);
10143 trace_sched_overutilized_tp(rd
, sg_status
& SG_OVERUTILIZED
);
10144 } else if (sg_status
& SG_OVERUTILIZED
) {
10145 struct root_domain
*rd
= env
->dst_rq
->rd
;
10147 WRITE_ONCE(rd
->overutilized
, SG_OVERUTILIZED
);
10148 trace_sched_overutilized_tp(rd
, SG_OVERUTILIZED
);
10151 update_idle_cpu_scan(env
, sum_util
);
10155 * calculate_imbalance - Calculate the amount of imbalance present within the
10156 * groups of a given sched_domain during load balance.
10157 * @env: load balance environment
10158 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
10160 static inline void calculate_imbalance(struct lb_env
*env
, struct sd_lb_stats
*sds
)
10162 struct sg_lb_stats
*local
, *busiest
;
10164 local
= &sds
->local_stat
;
10165 busiest
= &sds
->busiest_stat
;
10167 if (busiest
->group_type
== group_misfit_task
) {
10168 if (env
->sd
->flags
& SD_ASYM_CPUCAPACITY
) {
10169 /* Set imbalance to allow misfit tasks to be balanced. */
10170 env
->migration_type
= migrate_misfit
;
10171 env
->imbalance
= 1;
10174 * Set load imbalance to allow moving task from cpu
10175 * with reduced capacity.
10177 env
->migration_type
= migrate_load
;
10178 env
->imbalance
= busiest
->group_misfit_task_load
;
10183 if (busiest
->group_type
== group_asym_packing
) {
10185 * In case of asym capacity, we will try to migrate all load to
10186 * the preferred CPU.
10188 env
->migration_type
= migrate_task
;
10189 env
->imbalance
= busiest
->sum_h_nr_running
;
10193 if (busiest
->group_type
== group_imbalanced
) {
10195 * In the group_imb case we cannot rely on group-wide averages
10196 * to ensure CPU-load equilibrium, try to move any task to fix
10197 * the imbalance. The next load balance will take care of
10198 * balancing back the system.
10200 env
->migration_type
= migrate_task
;
10201 env
->imbalance
= 1;
10206 * Try to use spare capacity of local group without overloading it or
10207 * emptying busiest.
10209 if (local
->group_type
== group_has_spare
) {
10210 if ((busiest
->group_type
> group_fully_busy
) &&
10211 !(env
->sd
->flags
& SD_SHARE_PKG_RESOURCES
)) {
10213 * If busiest is overloaded, try to fill spare
10214 * capacity. This might end up creating spare capacity
10215 * in busiest or busiest still being overloaded but
10216 * there is no simple way to directly compute the
10217 * amount of load to migrate in order to balance the
10220 env
->migration_type
= migrate_util
;
10221 env
->imbalance
= max(local
->group_capacity
, local
->group_util
) -
10225 * In some cases, the group's utilization is max or even
10226 * higher than capacity because of migrations but the
10227 * local CPU is (newly) idle. There is at least one
10228 * waiting task in this overloaded busiest group. Let's
10231 if (env
->idle
!= CPU_NOT_IDLE
&& env
->imbalance
== 0) {
10232 env
->migration_type
= migrate_task
;
10233 env
->imbalance
= 1;
10239 if (busiest
->group_weight
== 1 || sds
->prefer_sibling
) {
10240 unsigned int nr_diff
= busiest
->sum_nr_running
;
10242 * When prefer sibling, evenly spread running tasks on
10245 env
->migration_type
= migrate_task
;
10246 lsub_positive(&nr_diff
, local
->sum_nr_running
);
10247 env
->imbalance
= nr_diff
;
10251 * If there is no overload, we just want to even the number of
10254 env
->migration_type
= migrate_task
;
10255 env
->imbalance
= max_t(long, 0,
10256 (local
->idle_cpus
- busiest
->idle_cpus
));
10260 /* Consider allowing a small imbalance between NUMA groups */
10261 if (env
->sd
->flags
& SD_NUMA
) {
10262 env
->imbalance
= adjust_numa_imbalance(env
->imbalance
,
10263 local
->sum_nr_running
+ 1,
10264 env
->sd
->imb_numa_nr
);
10268 /* Number of tasks to move to restore balance */
10269 env
->imbalance
>>= 1;
10275 * Local is fully busy but has to take more load to relieve the
10278 if (local
->group_type
< group_overloaded
) {
10280 * Local will become overloaded so the avg_load metrics are
10284 local
->avg_load
= (local
->group_load
* SCHED_CAPACITY_SCALE
) /
10285 local
->group_capacity
;
10288 * If the local group is more loaded than the selected
10289 * busiest group don't try to pull any tasks.
10291 if (local
->avg_load
>= busiest
->avg_load
) {
10292 env
->imbalance
= 0;
10296 sds
->avg_load
= (sds
->total_load
* SCHED_CAPACITY_SCALE
) /
10297 sds
->total_capacity
;
10300 * If the local group is more loaded than the average system
10301 * load, don't try to pull any tasks.
10303 if (local
->avg_load
>= sds
->avg_load
) {
10304 env
->imbalance
= 0;
10311 * Both group are or will become overloaded and we're trying to get all
10312 * the CPUs to the average_load, so we don't want to push ourselves
10313 * above the average load, nor do we wish to reduce the max loaded CPU
10314 * below the average load. At the same time, we also don't want to
10315 * reduce the group load below the group capacity. Thus we look for
10316 * the minimum possible imbalance.
10318 env
->migration_type
= migrate_load
;
10319 env
->imbalance
= min(
10320 (busiest
->avg_load
- sds
->avg_load
) * busiest
->group_capacity
,
10321 (sds
->avg_load
- local
->avg_load
) * local
->group_capacity
10322 ) / SCHED_CAPACITY_SCALE
;
10325 /******* find_busiest_group() helpers end here *********************/
10328 * Decision matrix according to the local and busiest group type:
10330 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
10331 * has_spare nr_idle balanced N/A N/A balanced balanced
10332 * fully_busy nr_idle nr_idle N/A N/A balanced balanced
10333 * misfit_task force N/A N/A N/A N/A N/A
10334 * asym_packing force force N/A N/A force force
10335 * imbalanced force force N/A N/A force force
10336 * overloaded force force N/A N/A force avg_load
10338 * N/A : Not Applicable because already filtered while updating
10340 * balanced : The system is balanced for these 2 groups.
10341 * force : Calculate the imbalance as load migration is probably needed.
10342 * avg_load : Only if imbalance is significant enough.
10343 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
10344 * different in groups.
10348 * find_busiest_group - Returns the busiest group within the sched_domain
10349 * if there is an imbalance.
10350 * @env: The load balancing environment.
10352 * Also calculates the amount of runnable load which should be moved
10353 * to restore balance.
10355 * Return: - The busiest group if imbalance exists.
10357 static struct sched_group
*find_busiest_group(struct lb_env
*env
)
10359 struct sg_lb_stats
*local
, *busiest
;
10360 struct sd_lb_stats sds
;
10362 init_sd_lb_stats(&sds
);
10365 * Compute the various statistics relevant for load balancing at
10368 update_sd_lb_stats(env
, &sds
);
10370 /* There is no busy sibling group to pull tasks from */
10374 busiest
= &sds
.busiest_stat
;
10376 /* Misfit tasks should be dealt with regardless of the avg load */
10377 if (busiest
->group_type
== group_misfit_task
)
10378 goto force_balance
;
10380 if (sched_energy_enabled()) {
10381 struct root_domain
*rd
= env
->dst_rq
->rd
;
10383 if (rcu_dereference(rd
->pd
) && !READ_ONCE(rd
->overutilized
))
10387 /* ASYM feature bypasses nice load balance check */
10388 if (busiest
->group_type
== group_asym_packing
)
10389 goto force_balance
;
10392 * If the busiest group is imbalanced the below checks don't
10393 * work because they assume all things are equal, which typically
10394 * isn't true due to cpus_ptr constraints and the like.
10396 if (busiest
->group_type
== group_imbalanced
)
10397 goto force_balance
;
10399 local
= &sds
.local_stat
;
10401 * If the local group is busier than the selected busiest group
10402 * don't try and pull any tasks.
10404 if (local
->group_type
> busiest
->group_type
)
10408 * When groups are overloaded, use the avg_load to ensure fairness
10411 if (local
->group_type
== group_overloaded
) {
10413 * If the local group is more loaded than the selected
10414 * busiest group don't try to pull any tasks.
10416 if (local
->avg_load
>= busiest
->avg_load
)
10419 /* XXX broken for overlapping NUMA groups */
10420 sds
.avg_load
= (sds
.total_load
* SCHED_CAPACITY_SCALE
) /
10421 sds
.total_capacity
;
10424 * Don't pull any tasks if this group is already above the
10425 * domain average load.
10427 if (local
->avg_load
>= sds
.avg_load
)
10431 * If the busiest group is more loaded, use imbalance_pct to be
10434 if (100 * busiest
->avg_load
<=
10435 env
->sd
->imbalance_pct
* local
->avg_load
)
10439 /* Try to move all excess tasks to child's sibling domain */
10440 if (sds
.prefer_sibling
&& local
->group_type
== group_has_spare
&&
10441 busiest
->sum_nr_running
> local
->sum_nr_running
+ 1)
10442 goto force_balance
;
10444 if (busiest
->group_type
!= group_overloaded
) {
10445 if (env
->idle
== CPU_NOT_IDLE
)
10447 * If the busiest group is not overloaded (and as a
10448 * result the local one too) but this CPU is already
10449 * busy, let another idle CPU try to pull task.
10453 if (busiest
->group_weight
> 1 &&
10454 local
->idle_cpus
<= (busiest
->idle_cpus
+ 1))
10456 * If the busiest group is not overloaded
10457 * and there is no imbalance between this and busiest
10458 * group wrt idle CPUs, it is balanced. The imbalance
10459 * becomes significant if the diff is greater than 1
10460 * otherwise we might end up to just move the imbalance
10461 * on another group. Of course this applies only if
10462 * there is more than 1 CPU per group.
10466 if (busiest
->sum_h_nr_running
== 1)
10468 * busiest doesn't have any tasks waiting to run
10474 /* Looks like there is an imbalance. Compute it */
10475 calculate_imbalance(env
, &sds
);
10476 return env
->imbalance
? sds
.busiest
: NULL
;
10479 env
->imbalance
= 0;
10484 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
10486 static struct rq
*find_busiest_queue(struct lb_env
*env
,
10487 struct sched_group
*group
)
10489 struct rq
*busiest
= NULL
, *rq
;
10490 unsigned long busiest_util
= 0, busiest_load
= 0, busiest_capacity
= 1;
10491 unsigned int busiest_nr
= 0;
10494 for_each_cpu_and(i
, sched_group_span(group
), env
->cpus
) {
10495 unsigned long capacity
, load
, util
;
10496 unsigned int nr_running
;
10500 rt
= fbq_classify_rq(rq
);
10503 * We classify groups/runqueues into three groups:
10504 * - regular: there are !numa tasks
10505 * - remote: there are numa tasks that run on the 'wrong' node
10506 * - all: there is no distinction
10508 * In order to avoid migrating ideally placed numa tasks,
10509 * ignore those when there's better options.
10511 * If we ignore the actual busiest queue to migrate another
10512 * task, the next balance pass can still reduce the busiest
10513 * queue by moving tasks around inside the node.
10515 * If we cannot move enough load due to this classification
10516 * the next pass will adjust the group classification and
10517 * allow migration of more tasks.
10519 * Both cases only affect the total convergence complexity.
10521 if (rt
> env
->fbq_type
)
10524 nr_running
= rq
->cfs
.h_nr_running
;
10528 capacity
= capacity_of(i
);
10531 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
10532 * eventually lead to active_balancing high->low capacity.
10533 * Higher per-CPU capacity is considered better than balancing
10536 if (env
->sd
->flags
& SD_ASYM_CPUCAPACITY
&&
10537 !capacity_greater(capacity_of(env
->dst_cpu
), capacity
) &&
10541 /* Make sure we only pull tasks from a CPU of lower priority */
10542 if ((env
->sd
->flags
& SD_ASYM_PACKING
) &&
10543 sched_asym_prefer(i
, env
->dst_cpu
) &&
10547 switch (env
->migration_type
) {
10550 * When comparing with load imbalance, use cpu_load()
10551 * which is not scaled with the CPU capacity.
10553 load
= cpu_load(rq
);
10555 if (nr_running
== 1 && load
> env
->imbalance
&&
10556 !check_cpu_capacity(rq
, env
->sd
))
10560 * For the load comparisons with the other CPUs,
10561 * consider the cpu_load() scaled with the CPU
10562 * capacity, so that the load can be moved away
10563 * from the CPU that is potentially running at a
10566 * Thus we're looking for max(load_i / capacity_i),
10567 * crosswise multiplication to rid ourselves of the
10568 * division works out to:
10569 * load_i * capacity_j > load_j * capacity_i;
10570 * where j is our previous maximum.
10572 if (load
* busiest_capacity
> busiest_load
* capacity
) {
10573 busiest_load
= load
;
10574 busiest_capacity
= capacity
;
10580 util
= cpu_util_cfs(i
);
10583 * Don't try to pull utilization from a CPU with one
10584 * running task. Whatever its utilization, we will fail
10587 if (nr_running
<= 1)
10590 if (busiest_util
< util
) {
10591 busiest_util
= util
;
10597 if (busiest_nr
< nr_running
) {
10598 busiest_nr
= nr_running
;
10603 case migrate_misfit
:
10605 * For ASYM_CPUCAPACITY domains with misfit tasks we
10606 * simply seek the "biggest" misfit task.
10608 if (rq
->misfit_task_load
> busiest_load
) {
10609 busiest_load
= rq
->misfit_task_load
;
10622 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
10623 * so long as it is large enough.
10625 #define MAX_PINNED_INTERVAL 512
10628 asym_active_balance(struct lb_env
*env
)
10631 * ASYM_PACKING needs to force migrate tasks from busy but
10632 * lower priority CPUs in order to pack all tasks in the
10633 * highest priority CPUs.
10635 return env
->idle
!= CPU_NOT_IDLE
&& (env
->sd
->flags
& SD_ASYM_PACKING
) &&
10636 sched_asym_prefer(env
->dst_cpu
, env
->src_cpu
);
10640 imbalanced_active_balance(struct lb_env
*env
)
10642 struct sched_domain
*sd
= env
->sd
;
10645 * The imbalanced case includes the case of pinned tasks preventing a fair
10646 * distribution of the load on the system but also the even distribution of the
10647 * threads on a system with spare capacity
10649 if ((env
->migration_type
== migrate_task
) &&
10650 (sd
->nr_balance_failed
> sd
->cache_nice_tries
+2))
10656 static int need_active_balance(struct lb_env
*env
)
10658 struct sched_domain
*sd
= env
->sd
;
10660 if (asym_active_balance(env
))
10663 if (imbalanced_active_balance(env
))
10667 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
10668 * It's worth migrating the task if the src_cpu's capacity is reduced
10669 * because of other sched_class or IRQs if more capacity stays
10670 * available on dst_cpu.
10672 if ((env
->idle
!= CPU_NOT_IDLE
) &&
10673 (env
->src_rq
->cfs
.h_nr_running
== 1)) {
10674 if ((check_cpu_capacity(env
->src_rq
, sd
)) &&
10675 (capacity_of(env
->src_cpu
)*sd
->imbalance_pct
< capacity_of(env
->dst_cpu
)*100))
10679 if (env
->migration_type
== migrate_misfit
)
10685 static int active_load_balance_cpu_stop(void *data
);
10687 static int should_we_balance(struct lb_env
*env
)
10689 struct sched_group
*sg
= env
->sd
->groups
;
10693 * Ensure the balancing environment is consistent; can happen
10694 * when the softirq triggers 'during' hotplug.
10696 if (!cpumask_test_cpu(env
->dst_cpu
, env
->cpus
))
10700 * In the newly idle case, we will allow all the CPUs
10701 * to do the newly idle load balance.
10703 * However, we bail out if we already have tasks or a wakeup pending,
10704 * to optimize wakeup latency.
10706 if (env
->idle
== CPU_NEWLY_IDLE
) {
10707 if (env
->dst_rq
->nr_running
> 0 || env
->dst_rq
->ttwu_pending
)
10712 /* Try to find first idle CPU */
10713 for_each_cpu_and(cpu
, group_balance_mask(sg
), env
->cpus
) {
10714 if (!idle_cpu(cpu
))
10717 /* Are we the first idle CPU? */
10718 return cpu
== env
->dst_cpu
;
10721 /* Are we the first CPU of this group ? */
10722 return group_balance_cpu(sg
) == env
->dst_cpu
;
10726 * Check this_cpu to ensure it is balanced within domain. Attempt to move
10727 * tasks if there is an imbalance.
10729 static int load_balance(int this_cpu
, struct rq
*this_rq
,
10730 struct sched_domain
*sd
, enum cpu_idle_type idle
,
10731 int *continue_balancing
)
10733 int ld_moved
, cur_ld_moved
, active_balance
= 0;
10734 struct sched_domain
*sd_parent
= sd
->parent
;
10735 struct sched_group
*group
;
10736 struct rq
*busiest
;
10737 struct rq_flags rf
;
10738 struct cpumask
*cpus
= this_cpu_cpumask_var_ptr(load_balance_mask
);
10739 struct lb_env env
= {
10741 .dst_cpu
= this_cpu
,
10743 .dst_grpmask
= sched_group_span(sd
->groups
),
10745 .loop_break
= SCHED_NR_MIGRATE_BREAK
,
10748 .tasks
= LIST_HEAD_INIT(env
.tasks
),
10751 cpumask_and(cpus
, sched_domain_span(sd
), cpu_active_mask
);
10753 schedstat_inc(sd
->lb_count
[idle
]);
10756 if (!should_we_balance(&env
)) {
10757 *continue_balancing
= 0;
10761 group
= find_busiest_group(&env
);
10763 schedstat_inc(sd
->lb_nobusyg
[idle
]);
10767 busiest
= find_busiest_queue(&env
, group
);
10769 schedstat_inc(sd
->lb_nobusyq
[idle
]);
10773 WARN_ON_ONCE(busiest
== env
.dst_rq
);
10775 schedstat_add(sd
->lb_imbalance
[idle
], env
.imbalance
);
10777 env
.src_cpu
= busiest
->cpu
;
10778 env
.src_rq
= busiest
;
10781 /* Clear this flag as soon as we find a pullable task */
10782 env
.flags
|= LBF_ALL_PINNED
;
10783 if (busiest
->nr_running
> 1) {
10785 * Attempt to move tasks. If find_busiest_group has found
10786 * an imbalance but busiest->nr_running <= 1, the group is
10787 * still unbalanced. ld_moved simply stays zero, so it is
10788 * correctly treated as an imbalance.
10790 env
.loop_max
= min(sysctl_sched_nr_migrate
, busiest
->nr_running
);
10793 rq_lock_irqsave(busiest
, &rf
);
10794 update_rq_clock(busiest
);
10797 * cur_ld_moved - load moved in current iteration
10798 * ld_moved - cumulative load moved across iterations
10800 cur_ld_moved
= detach_tasks(&env
);
10803 * We've detached some tasks from busiest_rq. Every
10804 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
10805 * unlock busiest->lock, and we are able to be sure
10806 * that nobody can manipulate the tasks in parallel.
10807 * See task_rq_lock() family for the details.
10810 rq_unlock(busiest
, &rf
);
10812 if (cur_ld_moved
) {
10813 attach_tasks(&env
);
10814 ld_moved
+= cur_ld_moved
;
10817 local_irq_restore(rf
.flags
);
10819 if (env
.flags
& LBF_NEED_BREAK
) {
10820 env
.flags
&= ~LBF_NEED_BREAK
;
10821 /* Stop if we tried all running tasks */
10822 if (env
.loop
< busiest
->nr_running
)
10827 * Revisit (affine) tasks on src_cpu that couldn't be moved to
10828 * us and move them to an alternate dst_cpu in our sched_group
10829 * where they can run. The upper limit on how many times we
10830 * iterate on same src_cpu is dependent on number of CPUs in our
10833 * This changes load balance semantics a bit on who can move
10834 * load to a given_cpu. In addition to the given_cpu itself
10835 * (or a ilb_cpu acting on its behalf where given_cpu is
10836 * nohz-idle), we now have balance_cpu in a position to move
10837 * load to given_cpu. In rare situations, this may cause
10838 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
10839 * _independently_ and at _same_ time to move some load to
10840 * given_cpu) causing excess load to be moved to given_cpu.
10841 * This however should not happen so much in practice and
10842 * moreover subsequent load balance cycles should correct the
10843 * excess load moved.
10845 if ((env
.flags
& LBF_DST_PINNED
) && env
.imbalance
> 0) {
10847 /* Prevent to re-select dst_cpu via env's CPUs */
10848 __cpumask_clear_cpu(env
.dst_cpu
, env
.cpus
);
10850 env
.dst_rq
= cpu_rq(env
.new_dst_cpu
);
10851 env
.dst_cpu
= env
.new_dst_cpu
;
10852 env
.flags
&= ~LBF_DST_PINNED
;
10854 env
.loop_break
= SCHED_NR_MIGRATE_BREAK
;
10857 * Go back to "more_balance" rather than "redo" since we
10858 * need to continue with same src_cpu.
10864 * We failed to reach balance because of affinity.
10867 int *group_imbalance
= &sd_parent
->groups
->sgc
->imbalance
;
10869 if ((env
.flags
& LBF_SOME_PINNED
) && env
.imbalance
> 0)
10870 *group_imbalance
= 1;
10873 /* All tasks on this runqueue were pinned by CPU affinity */
10874 if (unlikely(env
.flags
& LBF_ALL_PINNED
)) {
10875 __cpumask_clear_cpu(cpu_of(busiest
), cpus
);
10877 * Attempting to continue load balancing at the current
10878 * sched_domain level only makes sense if there are
10879 * active CPUs remaining as possible busiest CPUs to
10880 * pull load from which are not contained within the
10881 * destination group that is receiving any migrated
10884 if (!cpumask_subset(cpus
, env
.dst_grpmask
)) {
10886 env
.loop_break
= SCHED_NR_MIGRATE_BREAK
;
10889 goto out_all_pinned
;
10894 schedstat_inc(sd
->lb_failed
[idle
]);
10896 * Increment the failure counter only on periodic balance.
10897 * We do not want newidle balance, which can be very
10898 * frequent, pollute the failure counter causing
10899 * excessive cache_hot migrations and active balances.
10901 if (idle
!= CPU_NEWLY_IDLE
)
10902 sd
->nr_balance_failed
++;
10904 if (need_active_balance(&env
)) {
10905 unsigned long flags
;
10907 raw_spin_rq_lock_irqsave(busiest
, flags
);
10910 * Don't kick the active_load_balance_cpu_stop,
10911 * if the curr task on busiest CPU can't be
10912 * moved to this_cpu:
10914 if (!cpumask_test_cpu(this_cpu
, busiest
->curr
->cpus_ptr
)) {
10915 raw_spin_rq_unlock_irqrestore(busiest
, flags
);
10916 goto out_one_pinned
;
10919 /* Record that we found at least one task that could run on this_cpu */
10920 env
.flags
&= ~LBF_ALL_PINNED
;
10923 * ->active_balance synchronizes accesses to
10924 * ->active_balance_work. Once set, it's cleared
10925 * only after active load balance is finished.
10927 if (!busiest
->active_balance
) {
10928 busiest
->active_balance
= 1;
10929 busiest
->push_cpu
= this_cpu
;
10930 active_balance
= 1;
10932 raw_spin_rq_unlock_irqrestore(busiest
, flags
);
10934 if (active_balance
) {
10935 stop_one_cpu_nowait(cpu_of(busiest
),
10936 active_load_balance_cpu_stop
, busiest
,
10937 &busiest
->active_balance_work
);
10941 sd
->nr_balance_failed
= 0;
10944 if (likely(!active_balance
) || need_active_balance(&env
)) {
10945 /* We were unbalanced, so reset the balancing interval */
10946 sd
->balance_interval
= sd
->min_interval
;
10953 * We reach balance although we may have faced some affinity
10954 * constraints. Clear the imbalance flag only if other tasks got
10955 * a chance to move and fix the imbalance.
10957 if (sd_parent
&& !(env
.flags
& LBF_ALL_PINNED
)) {
10958 int *group_imbalance
= &sd_parent
->groups
->sgc
->imbalance
;
10960 if (*group_imbalance
)
10961 *group_imbalance
= 0;
10966 * We reach balance because all tasks are pinned at this level so
10967 * we can't migrate them. Let the imbalance flag set so parent level
10968 * can try to migrate them.
10970 schedstat_inc(sd
->lb_balanced
[idle
]);
10972 sd
->nr_balance_failed
= 0;
10978 * newidle_balance() disregards balance intervals, so we could
10979 * repeatedly reach this code, which would lead to balance_interval
10980 * skyrocketing in a short amount of time. Skip the balance_interval
10981 * increase logic to avoid that.
10983 if (env
.idle
== CPU_NEWLY_IDLE
)
10986 /* tune up the balancing interval */
10987 if ((env
.flags
& LBF_ALL_PINNED
&&
10988 sd
->balance_interval
< MAX_PINNED_INTERVAL
) ||
10989 sd
->balance_interval
< sd
->max_interval
)
10990 sd
->balance_interval
*= 2;
10995 static inline unsigned long
10996 get_sd_balance_interval(struct sched_domain
*sd
, int cpu_busy
)
10998 unsigned long interval
= sd
->balance_interval
;
11001 interval
*= sd
->busy_factor
;
11003 /* scale ms to jiffies */
11004 interval
= msecs_to_jiffies(interval
);
11007 * Reduce likelihood of busy balancing at higher domains racing with
11008 * balancing at lower domains by preventing their balancing periods
11009 * from being multiples of each other.
11014 interval
= clamp(interval
, 1UL, max_load_balance_interval
);
11020 update_next_balance(struct sched_domain
*sd
, unsigned long *next_balance
)
11022 unsigned long interval
, next
;
11024 /* used by idle balance, so cpu_busy = 0 */
11025 interval
= get_sd_balance_interval(sd
, 0);
11026 next
= sd
->last_balance
+ interval
;
11028 if (time_after(*next_balance
, next
))
11029 *next_balance
= next
;
11033 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
11034 * running tasks off the busiest CPU onto idle CPUs. It requires at
11035 * least 1 task to be running on each physical CPU where possible, and
11036 * avoids physical / logical imbalances.
11038 static int active_load_balance_cpu_stop(void *data
)
11040 struct rq
*busiest_rq
= data
;
11041 int busiest_cpu
= cpu_of(busiest_rq
);
11042 int target_cpu
= busiest_rq
->push_cpu
;
11043 struct rq
*target_rq
= cpu_rq(target_cpu
);
11044 struct sched_domain
*sd
;
11045 struct task_struct
*p
= NULL
;
11046 struct rq_flags rf
;
11048 rq_lock_irq(busiest_rq
, &rf
);
11050 * Between queueing the stop-work and running it is a hole in which
11051 * CPUs can become inactive. We should not move tasks from or to
11054 if (!cpu_active(busiest_cpu
) || !cpu_active(target_cpu
))
11057 /* Make sure the requested CPU hasn't gone down in the meantime: */
11058 if (unlikely(busiest_cpu
!= smp_processor_id() ||
11059 !busiest_rq
->active_balance
))
11062 /* Is there any task to move? */
11063 if (busiest_rq
->nr_running
<= 1)
11067 * This condition is "impossible", if it occurs
11068 * we need to fix it. Originally reported by
11069 * Bjorn Helgaas on a 128-CPU setup.
11071 WARN_ON_ONCE(busiest_rq
== target_rq
);
11073 /* Search for an sd spanning us and the target CPU. */
11075 for_each_domain(target_cpu
, sd
) {
11076 if (cpumask_test_cpu(busiest_cpu
, sched_domain_span(sd
)))
11081 struct lb_env env
= {
11083 .dst_cpu
= target_cpu
,
11084 .dst_rq
= target_rq
,
11085 .src_cpu
= busiest_rq
->cpu
,
11086 .src_rq
= busiest_rq
,
11088 .flags
= LBF_ACTIVE_LB
,
11091 schedstat_inc(sd
->alb_count
);
11092 update_rq_clock(busiest_rq
);
11094 p
= detach_one_task(&env
);
11096 schedstat_inc(sd
->alb_pushed
);
11097 /* Active balancing done, reset the failure counter. */
11098 sd
->nr_balance_failed
= 0;
11100 schedstat_inc(sd
->alb_failed
);
11105 busiest_rq
->active_balance
= 0;
11106 rq_unlock(busiest_rq
, &rf
);
11109 attach_one_task(target_rq
, p
);
11111 local_irq_enable();
11116 static DEFINE_SPINLOCK(balancing
);
11119 * Scale the max load_balance interval with the number of CPUs in the system.
11120 * This trades load-balance latency on larger machines for less cross talk.
11122 void update_max_interval(void)
11124 max_load_balance_interval
= HZ
*num_online_cpus()/10;
11127 static inline bool update_newidle_cost(struct sched_domain
*sd
, u64 cost
)
11129 if (cost
> sd
->max_newidle_lb_cost
) {
11131 * Track max cost of a domain to make sure to not delay the
11132 * next wakeup on the CPU.
11134 sd
->max_newidle_lb_cost
= cost
;
11135 sd
->last_decay_max_lb_cost
= jiffies
;
11136 } else if (time_after(jiffies
, sd
->last_decay_max_lb_cost
+ HZ
)) {
11138 * Decay the newidle max times by ~1% per second to ensure that
11139 * it is not outdated and the current max cost is actually
11142 sd
->max_newidle_lb_cost
= (sd
->max_newidle_lb_cost
* 253) / 256;
11143 sd
->last_decay_max_lb_cost
= jiffies
;
11152 * It checks each scheduling domain to see if it is due to be balanced,
11153 * and initiates a balancing operation if so.
11155 * Balancing parameters are set up in init_sched_domains.
11157 static void rebalance_domains(struct rq
*rq
, enum cpu_idle_type idle
)
11159 int continue_balancing
= 1;
11161 int busy
= idle
!= CPU_IDLE
&& !sched_idle_cpu(cpu
);
11162 unsigned long interval
;
11163 struct sched_domain
*sd
;
11164 /* Earliest time when we have to do rebalance again */
11165 unsigned long next_balance
= jiffies
+ 60*HZ
;
11166 int update_next_balance
= 0;
11167 int need_serialize
, need_decay
= 0;
11171 for_each_domain(cpu
, sd
) {
11173 * Decay the newidle max times here because this is a regular
11174 * visit to all the domains.
11176 need_decay
= update_newidle_cost(sd
, 0);
11177 max_cost
+= sd
->max_newidle_lb_cost
;
11180 * Stop the load balance at this level. There is another
11181 * CPU in our sched group which is doing load balancing more
11184 if (!continue_balancing
) {
11190 interval
= get_sd_balance_interval(sd
, busy
);
11192 need_serialize
= sd
->flags
& SD_SERIALIZE
;
11193 if (need_serialize
) {
11194 if (!spin_trylock(&balancing
))
11198 if (time_after_eq(jiffies
, sd
->last_balance
+ interval
)) {
11199 if (load_balance(cpu
, rq
, sd
, idle
, &continue_balancing
)) {
11201 * The LBF_DST_PINNED logic could have changed
11202 * env->dst_cpu, so we can't know our idle
11203 * state even if we migrated tasks. Update it.
11205 idle
= idle_cpu(cpu
) ? CPU_IDLE
: CPU_NOT_IDLE
;
11206 busy
= idle
!= CPU_IDLE
&& !sched_idle_cpu(cpu
);
11208 sd
->last_balance
= jiffies
;
11209 interval
= get_sd_balance_interval(sd
, busy
);
11211 if (need_serialize
)
11212 spin_unlock(&balancing
);
11214 if (time_after(next_balance
, sd
->last_balance
+ interval
)) {
11215 next_balance
= sd
->last_balance
+ interval
;
11216 update_next_balance
= 1;
11221 * Ensure the rq-wide value also decays but keep it at a
11222 * reasonable floor to avoid funnies with rq->avg_idle.
11224 rq
->max_idle_balance_cost
=
11225 max((u64
)sysctl_sched_migration_cost
, max_cost
);
11230 * next_balance will be updated only when there is a need.
11231 * When the cpu is attached to null domain for ex, it will not be
11234 if (likely(update_next_balance
))
11235 rq
->next_balance
= next_balance
;
11239 static inline int on_null_domain(struct rq
*rq
)
11241 return unlikely(!rcu_dereference_sched(rq
->sd
));
11244 #ifdef CONFIG_NO_HZ_COMMON
11246 * idle load balancing details
11247 * - When one of the busy CPUs notice that there may be an idle rebalancing
11248 * needed, they will kick the idle load balancer, which then does idle
11249 * load balancing for all the idle CPUs.
11250 * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
11254 static inline int find_new_ilb(void)
11257 const struct cpumask
*hk_mask
;
11259 hk_mask
= housekeeping_cpumask(HK_TYPE_MISC
);
11261 for_each_cpu_and(ilb
, nohz
.idle_cpus_mask
, hk_mask
) {
11263 if (ilb
== smp_processor_id())
11274 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
11275 * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
11277 static void kick_ilb(unsigned int flags
)
11282 * Increase nohz.next_balance only when if full ilb is triggered but
11283 * not if we only update stats.
11285 if (flags
& NOHZ_BALANCE_KICK
)
11286 nohz
.next_balance
= jiffies
+1;
11288 ilb_cpu
= find_new_ilb();
11290 if (ilb_cpu
>= nr_cpu_ids
)
11294 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
11295 * the first flag owns it; cleared by nohz_csd_func().
11297 flags
= atomic_fetch_or(flags
, nohz_flags(ilb_cpu
));
11298 if (flags
& NOHZ_KICK_MASK
)
11302 * This way we generate an IPI on the target CPU which
11303 * is idle. And the softirq performing nohz idle load balance
11304 * will be run before returning from the IPI.
11306 smp_call_function_single_async(ilb_cpu
, &cpu_rq(ilb_cpu
)->nohz_csd
);
11310 * Current decision point for kicking the idle load balancer in the presence
11311 * of idle CPUs in the system.
11313 static void nohz_balancer_kick(struct rq
*rq
)
11315 unsigned long now
= jiffies
;
11316 struct sched_domain_shared
*sds
;
11317 struct sched_domain
*sd
;
11318 int nr_busy
, i
, cpu
= rq
->cpu
;
11319 unsigned int flags
= 0;
11321 if (unlikely(rq
->idle_balance
))
11325 * We may be recently in ticked or tickless idle mode. At the first
11326 * busy tick after returning from idle, we will update the busy stats.
11328 nohz_balance_exit_idle(rq
);
11331 * None are in tickless mode and hence no need for NOHZ idle load
11334 if (likely(!atomic_read(&nohz
.nr_cpus
)))
11337 if (READ_ONCE(nohz
.has_blocked
) &&
11338 time_after(now
, READ_ONCE(nohz
.next_blocked
)))
11339 flags
= NOHZ_STATS_KICK
;
11341 if (time_before(now
, nohz
.next_balance
))
11344 if (rq
->nr_running
>= 2) {
11345 flags
= NOHZ_STATS_KICK
| NOHZ_BALANCE_KICK
;
11351 sd
= rcu_dereference(rq
->sd
);
11354 * If there's a CFS task and the current CPU has reduced
11355 * capacity; kick the ILB to see if there's a better CPU to run
11358 if (rq
->cfs
.h_nr_running
>= 1 && check_cpu_capacity(rq
, sd
)) {
11359 flags
= NOHZ_STATS_KICK
| NOHZ_BALANCE_KICK
;
11364 sd
= rcu_dereference(per_cpu(sd_asym_packing
, cpu
));
11367 * When ASYM_PACKING; see if there's a more preferred CPU
11368 * currently idle; in which case, kick the ILB to move tasks
11371 for_each_cpu_and(i
, sched_domain_span(sd
), nohz
.idle_cpus_mask
) {
11372 if (sched_asym_prefer(i
, cpu
)) {
11373 flags
= NOHZ_STATS_KICK
| NOHZ_BALANCE_KICK
;
11379 sd
= rcu_dereference(per_cpu(sd_asym_cpucapacity
, cpu
));
11382 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
11383 * to run the misfit task on.
11385 if (check_misfit_status(rq
, sd
)) {
11386 flags
= NOHZ_STATS_KICK
| NOHZ_BALANCE_KICK
;
11391 * For asymmetric systems, we do not want to nicely balance
11392 * cache use, instead we want to embrace asymmetry and only
11393 * ensure tasks have enough CPU capacity.
11395 * Skip the LLC logic because it's not relevant in that case.
11400 sds
= rcu_dereference(per_cpu(sd_llc_shared
, cpu
));
11403 * If there is an imbalance between LLC domains (IOW we could
11404 * increase the overall cache use), we need some less-loaded LLC
11405 * domain to pull some load. Likewise, we may need to spread
11406 * load within the current LLC domain (e.g. packed SMT cores but
11407 * other CPUs are idle). We can't really know from here how busy
11408 * the others are - so just get a nohz balance going if it looks
11409 * like this LLC domain has tasks we could move.
11411 nr_busy
= atomic_read(&sds
->nr_busy_cpus
);
11413 flags
= NOHZ_STATS_KICK
| NOHZ_BALANCE_KICK
;
11420 if (READ_ONCE(nohz
.needs_update
))
11421 flags
|= NOHZ_NEXT_KICK
;
11427 static void set_cpu_sd_state_busy(int cpu
)
11429 struct sched_domain
*sd
;
11432 sd
= rcu_dereference(per_cpu(sd_llc
, cpu
));
11434 if (!sd
|| !sd
->nohz_idle
)
11438 atomic_inc(&sd
->shared
->nr_busy_cpus
);
11443 void nohz_balance_exit_idle(struct rq
*rq
)
11445 SCHED_WARN_ON(rq
!= this_rq());
11447 if (likely(!rq
->nohz_tick_stopped
))
11450 rq
->nohz_tick_stopped
= 0;
11451 cpumask_clear_cpu(rq
->cpu
, nohz
.idle_cpus_mask
);
11452 atomic_dec(&nohz
.nr_cpus
);
11454 set_cpu_sd_state_busy(rq
->cpu
);
11457 static void set_cpu_sd_state_idle(int cpu
)
11459 struct sched_domain
*sd
;
11462 sd
= rcu_dereference(per_cpu(sd_llc
, cpu
));
11464 if (!sd
|| sd
->nohz_idle
)
11468 atomic_dec(&sd
->shared
->nr_busy_cpus
);
11474 * This routine will record that the CPU is going idle with tick stopped.
11475 * This info will be used in performing idle load balancing in the future.
11477 void nohz_balance_enter_idle(int cpu
)
11479 struct rq
*rq
= cpu_rq(cpu
);
11481 SCHED_WARN_ON(cpu
!= smp_processor_id());
11483 /* If this CPU is going down, then nothing needs to be done: */
11484 if (!cpu_active(cpu
))
11487 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
11488 if (!housekeeping_cpu(cpu
, HK_TYPE_SCHED
))
11492 * Can be set safely without rq->lock held
11493 * If a clear happens, it will have evaluated last additions because
11494 * rq->lock is held during the check and the clear
11496 rq
->has_blocked_load
= 1;
11499 * The tick is still stopped but load could have been added in the
11500 * meantime. We set the nohz.has_blocked flag to trig a check of the
11501 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
11502 * of nohz.has_blocked can only happen after checking the new load
11504 if (rq
->nohz_tick_stopped
)
11507 /* If we're a completely isolated CPU, we don't play: */
11508 if (on_null_domain(rq
))
11511 rq
->nohz_tick_stopped
= 1;
11513 cpumask_set_cpu(cpu
, nohz
.idle_cpus_mask
);
11514 atomic_inc(&nohz
.nr_cpus
);
11517 * Ensures that if nohz_idle_balance() fails to observe our
11518 * @idle_cpus_mask store, it must observe the @has_blocked
11519 * and @needs_update stores.
11521 smp_mb__after_atomic();
11523 set_cpu_sd_state_idle(cpu
);
11525 WRITE_ONCE(nohz
.needs_update
, 1);
11528 * Each time a cpu enter idle, we assume that it has blocked load and
11529 * enable the periodic update of the load of idle cpus
11531 WRITE_ONCE(nohz
.has_blocked
, 1);
11534 static bool update_nohz_stats(struct rq
*rq
)
11536 unsigned int cpu
= rq
->cpu
;
11538 if (!rq
->has_blocked_load
)
11541 if (!cpumask_test_cpu(cpu
, nohz
.idle_cpus_mask
))
11544 if (!time_after(jiffies
, READ_ONCE(rq
->last_blocked_load_update_tick
)))
11547 update_blocked_averages(cpu
);
11549 return rq
->has_blocked_load
;
11553 * Internal function that runs load balance for all idle cpus. The load balance
11554 * can be a simple update of blocked load or a complete load balance with
11555 * tasks movement depending of flags.
11557 static void _nohz_idle_balance(struct rq
*this_rq
, unsigned int flags
)
11559 /* Earliest time when we have to do rebalance again */
11560 unsigned long now
= jiffies
;
11561 unsigned long next_balance
= now
+ 60*HZ
;
11562 bool has_blocked_load
= false;
11563 int update_next_balance
= 0;
11564 int this_cpu
= this_rq
->cpu
;
11568 SCHED_WARN_ON((flags
& NOHZ_KICK_MASK
) == NOHZ_BALANCE_KICK
);
11571 * We assume there will be no idle load after this update and clear
11572 * the has_blocked flag. If a cpu enters idle in the mean time, it will
11573 * set the has_blocked flag and trigger another update of idle load.
11574 * Because a cpu that becomes idle, is added to idle_cpus_mask before
11575 * setting the flag, we are sure to not clear the state and not
11576 * check the load of an idle cpu.
11578 * Same applies to idle_cpus_mask vs needs_update.
11580 if (flags
& NOHZ_STATS_KICK
)
11581 WRITE_ONCE(nohz
.has_blocked
, 0);
11582 if (flags
& NOHZ_NEXT_KICK
)
11583 WRITE_ONCE(nohz
.needs_update
, 0);
11586 * Ensures that if we miss the CPU, we must see the has_blocked
11587 * store from nohz_balance_enter_idle().
11592 * Start with the next CPU after this_cpu so we will end with this_cpu and let a
11593 * chance for other idle cpu to pull load.
11595 for_each_cpu_wrap(balance_cpu
, nohz
.idle_cpus_mask
, this_cpu
+1) {
11596 if (!idle_cpu(balance_cpu
))
11600 * If this CPU gets work to do, stop the load balancing
11601 * work being done for other CPUs. Next load
11602 * balancing owner will pick it up.
11604 if (need_resched()) {
11605 if (flags
& NOHZ_STATS_KICK
)
11606 has_blocked_load
= true;
11607 if (flags
& NOHZ_NEXT_KICK
)
11608 WRITE_ONCE(nohz
.needs_update
, 1);
11612 rq
= cpu_rq(balance_cpu
);
11614 if (flags
& NOHZ_STATS_KICK
)
11615 has_blocked_load
|= update_nohz_stats(rq
);
11618 * If time for next balance is due,
11621 if (time_after_eq(jiffies
, rq
->next_balance
)) {
11622 struct rq_flags rf
;
11624 rq_lock_irqsave(rq
, &rf
);
11625 update_rq_clock(rq
);
11626 rq_unlock_irqrestore(rq
, &rf
);
11628 if (flags
& NOHZ_BALANCE_KICK
)
11629 rebalance_domains(rq
, CPU_IDLE
);
11632 if (time_after(next_balance
, rq
->next_balance
)) {
11633 next_balance
= rq
->next_balance
;
11634 update_next_balance
= 1;
11639 * next_balance will be updated only when there is a need.
11640 * When the CPU is attached to null domain for ex, it will not be
11643 if (likely(update_next_balance
))
11644 nohz
.next_balance
= next_balance
;
11646 if (flags
& NOHZ_STATS_KICK
)
11647 WRITE_ONCE(nohz
.next_blocked
,
11648 now
+ msecs_to_jiffies(LOAD_AVG_PERIOD
));
11651 /* There is still blocked load, enable periodic update */
11652 if (has_blocked_load
)
11653 WRITE_ONCE(nohz
.has_blocked
, 1);
11657 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
11658 * rebalancing for all the cpus for whom scheduler ticks are stopped.
11660 static bool nohz_idle_balance(struct rq
*this_rq
, enum cpu_idle_type idle
)
11662 unsigned int flags
= this_rq
->nohz_idle_balance
;
11667 this_rq
->nohz_idle_balance
= 0;
11669 if (idle
!= CPU_IDLE
)
11672 _nohz_idle_balance(this_rq
, flags
);
11678 * Check if we need to run the ILB for updating blocked load before entering
11681 void nohz_run_idle_balance(int cpu
)
11683 unsigned int flags
;
11685 flags
= atomic_fetch_andnot(NOHZ_NEWILB_KICK
, nohz_flags(cpu
));
11688 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
11689 * (ie NOHZ_STATS_KICK set) and will do the same.
11691 if ((flags
== NOHZ_NEWILB_KICK
) && !need_resched())
11692 _nohz_idle_balance(cpu_rq(cpu
), NOHZ_STATS_KICK
);
11695 static void nohz_newidle_balance(struct rq
*this_rq
)
11697 int this_cpu
= this_rq
->cpu
;
11700 * This CPU doesn't want to be disturbed by scheduler
11703 if (!housekeeping_cpu(this_cpu
, HK_TYPE_SCHED
))
11706 /* Will wake up very soon. No time for doing anything else*/
11707 if (this_rq
->avg_idle
< sysctl_sched_migration_cost
)
11710 /* Don't need to update blocked load of idle CPUs*/
11711 if (!READ_ONCE(nohz
.has_blocked
) ||
11712 time_before(jiffies
, READ_ONCE(nohz
.next_blocked
)))
11716 * Set the need to trigger ILB in order to update blocked load
11717 * before entering idle state.
11719 atomic_or(NOHZ_NEWILB_KICK
, nohz_flags(this_cpu
));
11722 #else /* !CONFIG_NO_HZ_COMMON */
11723 static inline void nohz_balancer_kick(struct rq
*rq
) { }
11725 static inline bool nohz_idle_balance(struct rq
*this_rq
, enum cpu_idle_type idle
)
11730 static inline void nohz_newidle_balance(struct rq
*this_rq
) { }
11731 #endif /* CONFIG_NO_HZ_COMMON */
11734 * newidle_balance is called by schedule() if this_cpu is about to become
11735 * idle. Attempts to pull tasks from other CPUs.
11738 * < 0 - we released the lock and there are !fair tasks present
11739 * 0 - failed, no new tasks
11740 * > 0 - success, new (fair) tasks present
11742 static int newidle_balance(struct rq
*this_rq
, struct rq_flags
*rf
)
11744 unsigned long next_balance
= jiffies
+ HZ
;
11745 int this_cpu
= this_rq
->cpu
;
11746 u64 t0
, t1
, curr_cost
= 0;
11747 struct sched_domain
*sd
;
11748 int pulled_task
= 0;
11750 update_misfit_status(NULL
, this_rq
);
11753 * There is a task waiting to run. No need to search for one.
11754 * Return 0; the task will be enqueued when switching to idle.
11756 if (this_rq
->ttwu_pending
)
11760 * We must set idle_stamp _before_ calling idle_balance(), such that we
11761 * measure the duration of idle_balance() as idle time.
11763 this_rq
->idle_stamp
= rq_clock(this_rq
);
11766 * Do not pull tasks towards !active CPUs...
11768 if (!cpu_active(this_cpu
))
11772 * This is OK, because current is on_cpu, which avoids it being picked
11773 * for load-balance and preemption/IRQs are still disabled avoiding
11774 * further scheduler activity on it and we're being very careful to
11775 * re-start the picking loop.
11777 rq_unpin_lock(this_rq
, rf
);
11780 sd
= rcu_dereference_check_sched_domain(this_rq
->sd
);
11782 if (!READ_ONCE(this_rq
->rd
->overload
) ||
11783 (sd
&& this_rq
->avg_idle
< sd
->max_newidle_lb_cost
)) {
11786 update_next_balance(sd
, &next_balance
);
11793 raw_spin_rq_unlock(this_rq
);
11795 t0
= sched_clock_cpu(this_cpu
);
11796 update_blocked_averages(this_cpu
);
11799 for_each_domain(this_cpu
, sd
) {
11800 int continue_balancing
= 1;
11803 update_next_balance(sd
, &next_balance
);
11805 if (this_rq
->avg_idle
< curr_cost
+ sd
->max_newidle_lb_cost
)
11808 if (sd
->flags
& SD_BALANCE_NEWIDLE
) {
11810 pulled_task
= load_balance(this_cpu
, this_rq
,
11811 sd
, CPU_NEWLY_IDLE
,
11812 &continue_balancing
);
11814 t1
= sched_clock_cpu(this_cpu
);
11815 domain_cost
= t1
- t0
;
11816 update_newidle_cost(sd
, domain_cost
);
11818 curr_cost
+= domain_cost
;
11823 * Stop searching for tasks to pull if there are
11824 * now runnable tasks on this rq.
11826 if (pulled_task
|| this_rq
->nr_running
> 0 ||
11827 this_rq
->ttwu_pending
)
11832 raw_spin_rq_lock(this_rq
);
11834 if (curr_cost
> this_rq
->max_idle_balance_cost
)
11835 this_rq
->max_idle_balance_cost
= curr_cost
;
11838 * While browsing the domains, we released the rq lock, a task could
11839 * have been enqueued in the meantime. Since we're not going idle,
11840 * pretend we pulled a task.
11842 if (this_rq
->cfs
.h_nr_running
&& !pulled_task
)
11845 /* Is there a task of a high priority class? */
11846 if (this_rq
->nr_running
!= this_rq
->cfs
.h_nr_running
)
11850 /* Move the next balance forward */
11851 if (time_after(this_rq
->next_balance
, next_balance
))
11852 this_rq
->next_balance
= next_balance
;
11855 this_rq
->idle_stamp
= 0;
11857 nohz_newidle_balance(this_rq
);
11859 rq_repin_lock(this_rq
, rf
);
11861 return pulled_task
;
11865 * run_rebalance_domains is triggered when needed from the scheduler tick.
11866 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
11868 static __latent_entropy
void run_rebalance_domains(struct softirq_action
*h
)
11870 struct rq
*this_rq
= this_rq();
11871 enum cpu_idle_type idle
= this_rq
->idle_balance
?
11872 CPU_IDLE
: CPU_NOT_IDLE
;
11875 * If this CPU has a pending nohz_balance_kick, then do the
11876 * balancing on behalf of the other idle CPUs whose ticks are
11877 * stopped. Do nohz_idle_balance *before* rebalance_domains to
11878 * give the idle CPUs a chance to load balance. Else we may
11879 * load balance only within the local sched_domain hierarchy
11880 * and abort nohz_idle_balance altogether if we pull some load.
11882 if (nohz_idle_balance(this_rq
, idle
))
11885 /* normal load balance */
11886 update_blocked_averages(this_rq
->cpu
);
11887 rebalance_domains(this_rq
, idle
);
11891 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
11893 void trigger_load_balance(struct rq
*rq
)
11896 * Don't need to rebalance while attached to NULL domain or
11897 * runqueue CPU is not active
11899 if (unlikely(on_null_domain(rq
) || !cpu_active(cpu_of(rq
))))
11902 if (time_after_eq(jiffies
, rq
->next_balance
))
11903 raise_softirq(SCHED_SOFTIRQ
);
11905 nohz_balancer_kick(rq
);
11908 static void rq_online_fair(struct rq
*rq
)
11912 update_runtime_enabled(rq
);
11915 static void rq_offline_fair(struct rq
*rq
)
11919 /* Ensure any throttled groups are reachable by pick_next_task */
11920 unthrottle_offline_cfs_rqs(rq
);
11923 #endif /* CONFIG_SMP */
11925 #ifdef CONFIG_SCHED_CORE
11927 __entity_slice_used(struct sched_entity
*se
, int min_nr_tasks
)
11929 u64 slice
= sched_slice(cfs_rq_of(se
), se
);
11930 u64 rtime
= se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
;
11932 return (rtime
* min_nr_tasks
> slice
);
11935 #define MIN_NR_TASKS_DURING_FORCEIDLE 2
11936 static inline void task_tick_core(struct rq
*rq
, struct task_struct
*curr
)
11938 if (!sched_core_enabled(rq
))
11942 * If runqueue has only one task which used up its slice and
11943 * if the sibling is forced idle, then trigger schedule to
11944 * give forced idle task a chance.
11946 * sched_slice() considers only this active rq and it gets the
11947 * whole slice. But during force idle, we have siblings acting
11948 * like a single runqueue and hence we need to consider runnable
11949 * tasks on this CPU and the forced idle CPU. Ideally, we should
11950 * go through the forced idle rq, but that would be a perf hit.
11951 * We can assume that the forced idle CPU has at least
11952 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
11953 * if we need to give up the CPU.
11955 if (rq
->core
->core_forceidle_count
&& rq
->cfs
.nr_running
== 1 &&
11956 __entity_slice_used(&curr
->se
, MIN_NR_TASKS_DURING_FORCEIDLE
))
11961 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
11963 static void se_fi_update(const struct sched_entity
*se
, unsigned int fi_seq
,
11966 for_each_sched_entity(se
) {
11967 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
11970 if (cfs_rq
->forceidle_seq
== fi_seq
)
11972 cfs_rq
->forceidle_seq
= fi_seq
;
11975 cfs_rq
->min_vruntime_fi
= cfs_rq
->min_vruntime
;
11979 void task_vruntime_update(struct rq
*rq
, struct task_struct
*p
, bool in_fi
)
11981 struct sched_entity
*se
= &p
->se
;
11983 if (p
->sched_class
!= &fair_sched_class
)
11986 se_fi_update(se
, rq
->core
->core_forceidle_seq
, in_fi
);
11989 bool cfs_prio_less(const struct task_struct
*a
, const struct task_struct
*b
,
11992 struct rq
*rq
= task_rq(a
);
11993 const struct sched_entity
*sea
= &a
->se
;
11994 const struct sched_entity
*seb
= &b
->se
;
11995 struct cfs_rq
*cfs_rqa
;
11996 struct cfs_rq
*cfs_rqb
;
11999 SCHED_WARN_ON(task_rq(b
)->core
!= rq
->core
);
12001 #ifdef CONFIG_FAIR_GROUP_SCHED
12003 * Find an se in the hierarchy for tasks a and b, such that the se's
12004 * are immediate siblings.
12006 while (sea
->cfs_rq
->tg
!= seb
->cfs_rq
->tg
) {
12007 int sea_depth
= sea
->depth
;
12008 int seb_depth
= seb
->depth
;
12010 if (sea_depth
>= seb_depth
)
12011 sea
= parent_entity(sea
);
12012 if (sea_depth
<= seb_depth
)
12013 seb
= parent_entity(seb
);
12016 se_fi_update(sea
, rq
->core
->core_forceidle_seq
, in_fi
);
12017 se_fi_update(seb
, rq
->core
->core_forceidle_seq
, in_fi
);
12019 cfs_rqa
= sea
->cfs_rq
;
12020 cfs_rqb
= seb
->cfs_rq
;
12022 cfs_rqa
= &task_rq(a
)->cfs
;
12023 cfs_rqb
= &task_rq(b
)->cfs
;
12027 * Find delta after normalizing se's vruntime with its cfs_rq's
12028 * min_vruntime_fi, which would have been updated in prior calls
12029 * to se_fi_update().
12031 delta
= (s64
)(sea
->vruntime
- seb
->vruntime
) +
12032 (s64
)(cfs_rqb
->min_vruntime_fi
- cfs_rqa
->min_vruntime_fi
);
12037 static inline void task_tick_core(struct rq
*rq
, struct task_struct
*curr
) {}
12041 * scheduler tick hitting a task of our scheduling class.
12043 * NOTE: This function can be called remotely by the tick offload that
12044 * goes along full dynticks. Therefore no local assumption can be made
12045 * and everything must be accessed through the @rq and @curr passed in
12048 static void task_tick_fair(struct rq
*rq
, struct task_struct
*curr
, int queued
)
12050 struct cfs_rq
*cfs_rq
;
12051 struct sched_entity
*se
= &curr
->se
;
12053 for_each_sched_entity(se
) {
12054 cfs_rq
= cfs_rq_of(se
);
12055 entity_tick(cfs_rq
, se
, queued
);
12058 if (static_branch_unlikely(&sched_numa_balancing
))
12059 task_tick_numa(rq
, curr
);
12061 update_misfit_status(curr
, rq
);
12062 update_overutilized_status(task_rq(curr
));
12064 task_tick_core(rq
, curr
);
12068 * called on fork with the child task as argument from the parent's context
12069 * - child not yet on the tasklist
12070 * - preemption disabled
12072 static void task_fork_fair(struct task_struct
*p
)
12074 struct cfs_rq
*cfs_rq
;
12075 struct sched_entity
*se
= &p
->se
, *curr
;
12076 struct rq
*rq
= this_rq();
12077 struct rq_flags rf
;
12080 update_rq_clock(rq
);
12082 cfs_rq
= task_cfs_rq(current
);
12083 curr
= cfs_rq
->curr
;
12085 update_curr(cfs_rq
);
12086 se
->vruntime
= curr
->vruntime
;
12088 place_entity(cfs_rq
, se
, 1);
12090 if (sysctl_sched_child_runs_first
&& curr
&& entity_before(curr
, se
)) {
12092 * Upon rescheduling, sched_class::put_prev_task() will place
12093 * 'current' within the tree based on its new key value.
12095 swap(curr
->vruntime
, se
->vruntime
);
12099 se
->vruntime
-= cfs_rq
->min_vruntime
;
12100 rq_unlock(rq
, &rf
);
12104 * Priority of the task has changed. Check to see if we preempt
12105 * the current task.
12108 prio_changed_fair(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
12110 if (!task_on_rq_queued(p
))
12113 if (rq
->cfs
.nr_running
== 1)
12117 * Reschedule if we are currently running on this runqueue and
12118 * our priority decreased, or if we are not currently running on
12119 * this runqueue and our priority is higher than the current's
12121 if (task_current(rq
, p
)) {
12122 if (p
->prio
> oldprio
)
12125 check_preempt_curr(rq
, p
, 0);
12128 static inline bool vruntime_normalized(struct task_struct
*p
)
12130 struct sched_entity
*se
= &p
->se
;
12133 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
12134 * the dequeue_entity(.flags=0) will already have normalized the
12141 * When !on_rq, vruntime of the task has usually NOT been normalized.
12142 * But there are some cases where it has already been normalized:
12144 * - A forked child which is waiting for being woken up by
12145 * wake_up_new_task().
12146 * - A task which has been woken up by try_to_wake_up() and
12147 * waiting for actually being woken up by sched_ttwu_pending().
12149 if (!se
->sum_exec_runtime
||
12150 (READ_ONCE(p
->__state
) == TASK_WAKING
&& p
->sched_remote_wakeup
))
12156 #ifdef CONFIG_FAIR_GROUP_SCHED
12158 * Propagate the changes of the sched_entity across the tg tree to make it
12159 * visible to the root
12161 static void propagate_entity_cfs_rq(struct sched_entity
*se
)
12163 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12165 if (cfs_rq_throttled(cfs_rq
))
12168 if (!throttled_hierarchy(cfs_rq
))
12169 list_add_leaf_cfs_rq(cfs_rq
);
12171 /* Start to propagate at parent */
12174 for_each_sched_entity(se
) {
12175 cfs_rq
= cfs_rq_of(se
);
12177 update_load_avg(cfs_rq
, se
, UPDATE_TG
);
12179 if (cfs_rq_throttled(cfs_rq
))
12182 if (!throttled_hierarchy(cfs_rq
))
12183 list_add_leaf_cfs_rq(cfs_rq
);
12187 static void propagate_entity_cfs_rq(struct sched_entity
*se
) { }
12190 static void detach_entity_cfs_rq(struct sched_entity
*se
)
12192 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12196 * In case the task sched_avg hasn't been attached:
12197 * - A forked task which hasn't been woken up by wake_up_new_task().
12198 * - A task which has been woken up by try_to_wake_up() but is
12199 * waiting for actually being woken up by sched_ttwu_pending().
12201 if (!se
->avg
.last_update_time
)
12205 /* Catch up with the cfs_rq and remove our load when we leave */
12206 update_load_avg(cfs_rq
, se
, 0);
12207 detach_entity_load_avg(cfs_rq
, se
);
12208 update_tg_load_avg(cfs_rq
);
12209 propagate_entity_cfs_rq(se
);
12212 static void attach_entity_cfs_rq(struct sched_entity
*se
)
12214 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12216 /* Synchronize entity with its cfs_rq */
12217 update_load_avg(cfs_rq
, se
, sched_feat(ATTACH_AGE_LOAD
) ? 0 : SKIP_AGE_LOAD
);
12218 attach_entity_load_avg(cfs_rq
, se
);
12219 update_tg_load_avg(cfs_rq
);
12220 propagate_entity_cfs_rq(se
);
12223 static void detach_task_cfs_rq(struct task_struct
*p
)
12225 struct sched_entity
*se
= &p
->se
;
12226 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12228 if (!vruntime_normalized(p
)) {
12230 * Fix up our vruntime so that the current sleep doesn't
12231 * cause 'unlimited' sleep bonus.
12233 place_entity(cfs_rq
, se
, 0);
12234 se
->vruntime
-= cfs_rq
->min_vruntime
;
12237 detach_entity_cfs_rq(se
);
12240 static void attach_task_cfs_rq(struct task_struct
*p
)
12242 struct sched_entity
*se
= &p
->se
;
12243 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12245 attach_entity_cfs_rq(se
);
12247 if (!vruntime_normalized(p
))
12248 se
->vruntime
+= cfs_rq
->min_vruntime
;
12251 static void switched_from_fair(struct rq
*rq
, struct task_struct
*p
)
12253 detach_task_cfs_rq(p
);
12256 static void switched_to_fair(struct rq
*rq
, struct task_struct
*p
)
12258 attach_task_cfs_rq(p
);
12260 if (task_on_rq_queued(p
)) {
12262 * We were most likely switched from sched_rt, so
12263 * kick off the schedule if running, otherwise just see
12264 * if we can still preempt the current task.
12266 if (task_current(rq
, p
))
12269 check_preempt_curr(rq
, p
, 0);
12273 /* Account for a task changing its policy or group.
12275 * This routine is mostly called to set cfs_rq->curr field when a task
12276 * migrates between groups/classes.
12278 static void set_next_task_fair(struct rq
*rq
, struct task_struct
*p
, bool first
)
12280 struct sched_entity
*se
= &p
->se
;
12283 if (task_on_rq_queued(p
)) {
12285 * Move the next running task to the front of the list, so our
12286 * cfs_tasks list becomes MRU one.
12288 list_move(&se
->group_node
, &rq
->cfs_tasks
);
12292 for_each_sched_entity(se
) {
12293 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12295 set_next_entity(cfs_rq
, se
);
12296 /* ensure bandwidth has been allocated on our new cfs_rq */
12297 account_cfs_rq_runtime(cfs_rq
, 0);
12301 void init_cfs_rq(struct cfs_rq
*cfs_rq
)
12303 cfs_rq
->tasks_timeline
= RB_ROOT_CACHED
;
12304 u64_u32_store(cfs_rq
->min_vruntime
, (u64
)(-(1LL << 20)));
12306 raw_spin_lock_init(&cfs_rq
->removed
.lock
);
12310 #ifdef CONFIG_FAIR_GROUP_SCHED
12311 static void task_change_group_fair(struct task_struct
*p
)
12314 * We couldn't detach or attach a forked task which
12315 * hasn't been woken up by wake_up_new_task().
12317 if (READ_ONCE(p
->__state
) == TASK_NEW
)
12320 detach_task_cfs_rq(p
);
12323 /* Tell se's cfs_rq has been changed -- migrated */
12324 p
->se
.avg
.last_update_time
= 0;
12326 set_task_rq(p
, task_cpu(p
));
12327 attach_task_cfs_rq(p
);
12330 void free_fair_sched_group(struct task_group
*tg
)
12334 for_each_possible_cpu(i
) {
12336 kfree(tg
->cfs_rq
[i
]);
12345 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
12347 struct sched_entity
*se
;
12348 struct cfs_rq
*cfs_rq
;
12351 tg
->cfs_rq
= kcalloc(nr_cpu_ids
, sizeof(cfs_rq
), GFP_KERNEL
);
12354 tg
->se
= kcalloc(nr_cpu_ids
, sizeof(se
), GFP_KERNEL
);
12358 tg
->shares
= NICE_0_LOAD
;
12360 init_cfs_bandwidth(tg_cfs_bandwidth(tg
));
12362 for_each_possible_cpu(i
) {
12363 cfs_rq
= kzalloc_node(sizeof(struct cfs_rq
),
12364 GFP_KERNEL
, cpu_to_node(i
));
12368 se
= kzalloc_node(sizeof(struct sched_entity_stats
),
12369 GFP_KERNEL
, cpu_to_node(i
));
12373 init_cfs_rq(cfs_rq
);
12374 init_tg_cfs_entry(tg
, cfs_rq
, se
, i
, parent
->se
[i
]);
12375 init_entity_runnable_average(se
);
12386 void online_fair_sched_group(struct task_group
*tg
)
12388 struct sched_entity
*se
;
12389 struct rq_flags rf
;
12393 for_each_possible_cpu(i
) {
12396 rq_lock_irq(rq
, &rf
);
12397 update_rq_clock(rq
);
12398 attach_entity_cfs_rq(se
);
12399 sync_throttle(tg
, i
);
12400 rq_unlock_irq(rq
, &rf
);
12404 void unregister_fair_sched_group(struct task_group
*tg
)
12406 unsigned long flags
;
12410 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg
));
12412 for_each_possible_cpu(cpu
) {
12414 remove_entity_load_avg(tg
->se
[cpu
]);
12417 * Only empty task groups can be destroyed; so we can speculatively
12418 * check on_list without danger of it being re-added.
12420 if (!tg
->cfs_rq
[cpu
]->on_list
)
12425 raw_spin_rq_lock_irqsave(rq
, flags
);
12426 list_del_leaf_cfs_rq(tg
->cfs_rq
[cpu
]);
12427 raw_spin_rq_unlock_irqrestore(rq
, flags
);
12431 void init_tg_cfs_entry(struct task_group
*tg
, struct cfs_rq
*cfs_rq
,
12432 struct sched_entity
*se
, int cpu
,
12433 struct sched_entity
*parent
)
12435 struct rq
*rq
= cpu_rq(cpu
);
12439 init_cfs_rq_runtime(cfs_rq
);
12441 tg
->cfs_rq
[cpu
] = cfs_rq
;
12444 /* se could be NULL for root_task_group */
12449 se
->cfs_rq
= &rq
->cfs
;
12452 se
->cfs_rq
= parent
->my_q
;
12453 se
->depth
= parent
->depth
+ 1;
12457 /* guarantee group entities always have weight */
12458 update_load_set(&se
->load
, NICE_0_LOAD
);
12459 se
->parent
= parent
;
12462 static DEFINE_MUTEX(shares_mutex
);
12464 static int __sched_group_set_shares(struct task_group
*tg
, unsigned long shares
)
12468 lockdep_assert_held(&shares_mutex
);
12471 * We can't change the weight of the root cgroup.
12476 shares
= clamp(shares
, scale_load(MIN_SHARES
), scale_load(MAX_SHARES
));
12478 if (tg
->shares
== shares
)
12481 tg
->shares
= shares
;
12482 for_each_possible_cpu(i
) {
12483 struct rq
*rq
= cpu_rq(i
);
12484 struct sched_entity
*se
= tg
->se
[i
];
12485 struct rq_flags rf
;
12487 /* Propagate contribution to hierarchy */
12488 rq_lock_irqsave(rq
, &rf
);
12489 update_rq_clock(rq
);
12490 for_each_sched_entity(se
) {
12491 update_load_avg(cfs_rq_of(se
), se
, UPDATE_TG
);
12492 update_cfs_group(se
);
12494 rq_unlock_irqrestore(rq
, &rf
);
12500 int sched_group_set_shares(struct task_group
*tg
, unsigned long shares
)
12504 mutex_lock(&shares_mutex
);
12505 if (tg_is_idle(tg
))
12508 ret
= __sched_group_set_shares(tg
, shares
);
12509 mutex_unlock(&shares_mutex
);
12514 int sched_group_set_idle(struct task_group
*tg
, long idle
)
12518 if (tg
== &root_task_group
)
12521 if (idle
< 0 || idle
> 1)
12524 mutex_lock(&shares_mutex
);
12526 if (tg
->idle
== idle
) {
12527 mutex_unlock(&shares_mutex
);
12533 for_each_possible_cpu(i
) {
12534 struct rq
*rq
= cpu_rq(i
);
12535 struct sched_entity
*se
= tg
->se
[i
];
12536 struct cfs_rq
*parent_cfs_rq
, *grp_cfs_rq
= tg
->cfs_rq
[i
];
12537 bool was_idle
= cfs_rq_is_idle(grp_cfs_rq
);
12538 long idle_task_delta
;
12539 struct rq_flags rf
;
12541 rq_lock_irqsave(rq
, &rf
);
12543 grp_cfs_rq
->idle
= idle
;
12544 if (WARN_ON_ONCE(was_idle
== cfs_rq_is_idle(grp_cfs_rq
)))
12548 parent_cfs_rq
= cfs_rq_of(se
);
12549 if (cfs_rq_is_idle(grp_cfs_rq
))
12550 parent_cfs_rq
->idle_nr_running
++;
12552 parent_cfs_rq
->idle_nr_running
--;
12555 idle_task_delta
= grp_cfs_rq
->h_nr_running
-
12556 grp_cfs_rq
->idle_h_nr_running
;
12557 if (!cfs_rq_is_idle(grp_cfs_rq
))
12558 idle_task_delta
*= -1;
12560 for_each_sched_entity(se
) {
12561 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
12566 cfs_rq
->idle_h_nr_running
+= idle_task_delta
;
12568 /* Already accounted at parent level and above. */
12569 if (cfs_rq_is_idle(cfs_rq
))
12574 rq_unlock_irqrestore(rq
, &rf
);
12577 /* Idle groups have minimum weight. */
12578 if (tg_is_idle(tg
))
12579 __sched_group_set_shares(tg
, scale_load(WEIGHT_IDLEPRIO
));
12581 __sched_group_set_shares(tg
, NICE_0_LOAD
);
12583 mutex_unlock(&shares_mutex
);
12587 #else /* CONFIG_FAIR_GROUP_SCHED */
12589 void free_fair_sched_group(struct task_group
*tg
) { }
12591 int alloc_fair_sched_group(struct task_group
*tg
, struct task_group
*parent
)
12596 void online_fair_sched_group(struct task_group
*tg
) { }
12598 void unregister_fair_sched_group(struct task_group
*tg
) { }
12600 #endif /* CONFIG_FAIR_GROUP_SCHED */
12603 static unsigned int get_rr_interval_fair(struct rq
*rq
, struct task_struct
*task
)
12605 struct sched_entity
*se
= &task
->se
;
12606 unsigned int rr_interval
= 0;
12609 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
12612 if (rq
->cfs
.load
.weight
)
12613 rr_interval
= NS_TO_JIFFIES(sched_slice(cfs_rq_of(se
), se
));
12615 return rr_interval
;
12619 * All the scheduling class methods:
12621 DEFINE_SCHED_CLASS(fair
) = {
12623 .enqueue_task
= enqueue_task_fair
,
12624 .dequeue_task
= dequeue_task_fair
,
12625 .yield_task
= yield_task_fair
,
12626 .yield_to_task
= yield_to_task_fair
,
12628 .check_preempt_curr
= check_preempt_wakeup
,
12630 .pick_next_task
= __pick_next_task_fair
,
12631 .put_prev_task
= put_prev_task_fair
,
12632 .set_next_task
= set_next_task_fair
,
12635 .balance
= balance_fair
,
12636 .pick_task
= pick_task_fair
,
12637 .select_task_rq
= select_task_rq_fair
,
12638 .migrate_task_rq
= migrate_task_rq_fair
,
12640 .rq_online
= rq_online_fair
,
12641 .rq_offline
= rq_offline_fair
,
12643 .task_dead
= task_dead_fair
,
12644 .set_cpus_allowed
= set_cpus_allowed_common
,
12647 .task_tick
= task_tick_fair
,
12648 .task_fork
= task_fork_fair
,
12650 .prio_changed
= prio_changed_fair
,
12651 .switched_from
= switched_from_fair
,
12652 .switched_to
= switched_to_fair
,
12654 .get_rr_interval
= get_rr_interval_fair
,
12656 .update_curr
= update_curr_fair
,
12658 #ifdef CONFIG_FAIR_GROUP_SCHED
12659 .task_change_group
= task_change_group_fair
,
12662 #ifdef CONFIG_UCLAMP_TASK
12663 .uclamp_enabled
= 1,
12667 #ifdef CONFIG_SCHED_DEBUG
12668 void print_cfs_stats(struct seq_file
*m
, int cpu
)
12670 struct cfs_rq
*cfs_rq
, *pos
;
12673 for_each_leaf_cfs_rq_safe(cpu_rq(cpu
), cfs_rq
, pos
)
12674 print_cfs_rq(m
, cpu
, cfs_rq
);
12678 #ifdef CONFIG_NUMA_BALANCING
12679 void show_numa_stats(struct task_struct
*p
, struct seq_file
*m
)
12682 unsigned long tsf
= 0, tpf
= 0, gsf
= 0, gpf
= 0;
12683 struct numa_group
*ng
;
12686 ng
= rcu_dereference(p
->numa_group
);
12687 for_each_online_node(node
) {
12688 if (p
->numa_faults
) {
12689 tsf
= p
->numa_faults
[task_faults_idx(NUMA_MEM
, node
, 0)];
12690 tpf
= p
->numa_faults
[task_faults_idx(NUMA_MEM
, node
, 1)];
12693 gsf
= ng
->faults
[task_faults_idx(NUMA_MEM
, node
, 0)],
12694 gpf
= ng
->faults
[task_faults_idx(NUMA_MEM
, node
, 1)];
12696 print_numa_stats(m
, node
, tsf
, tpf
, gsf
, gpf
);
12700 #endif /* CONFIG_NUMA_BALANCING */
12701 #endif /* CONFIG_SCHED_DEBUG */
12703 __init
void init_sched_fair_class(void)
12708 for_each_possible_cpu(i
) {
12709 zalloc_cpumask_var_node(&per_cpu(load_balance_mask
, i
), GFP_KERNEL
, cpu_to_node(i
));
12710 zalloc_cpumask_var_node(&per_cpu(select_rq_mask
, i
), GFP_KERNEL
, cpu_to_node(i
));
12712 #ifdef CONFIG_CFS_BANDWIDTH
12713 INIT_CSD(&cpu_rq(i
)->cfsb_csd
, __cfsb_csd_unthrottle
, cpu_rq(i
));
12714 INIT_LIST_HEAD(&cpu_rq(i
)->cfsb_csd_list
);
12718 open_softirq(SCHED_SOFTIRQ
, run_rebalance_domains
);
12720 #ifdef CONFIG_NO_HZ_COMMON
12721 nohz
.next_balance
= jiffies
;
12722 nohz
.next_blocked
= jiffies
;
12723 zalloc_cpumask_var(&nohz
.idle_cpus_mask
, GFP_NOWAIT
);