1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * No idle tick implementation for low and high resolution timers
9 * Started by: Thomas Gleixner and Ingo Molnar
11 #include <linux/cpu.h>
12 #include <linux/err.h>
13 #include <linux/hrtimer.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/percpu.h>
17 #include <linux/nmi.h>
18 #include <linux/profile.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/clock.h>
21 #include <linux/sched/stat.h>
22 #include <linux/sched/nohz.h>
23 #include <linux/module.h>
24 #include <linux/irq_work.h>
25 #include <linux/posix-timers.h>
26 #include <linux/context_tracking.h>
29 #include <asm/irq_regs.h>
31 #include "tick-internal.h"
33 #include <trace/events/timer.h>
36 * Per-CPU nohz control structure
38 static DEFINE_PER_CPU(struct tick_sched
, tick_cpu_sched
);
40 struct tick_sched
*tick_get_tick_sched(int cpu
)
42 return &per_cpu(tick_cpu_sched
, cpu
);
45 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
47 * The time, when the last jiffy update happened. Protected by jiffies_lock.
49 static ktime_t last_jiffies_update
;
52 * Must be called with interrupts disabled !
54 static void tick_do_update_jiffies64(ktime_t now
)
56 unsigned long ticks
= 0;
60 * Do a quick check without holding jiffies_lock:
62 delta
= ktime_sub(now
, last_jiffies_update
);
63 if (delta
< tick_period
)
66 /* Reevaluate with jiffies_lock held */
67 write_seqlock(&jiffies_lock
);
69 delta
= ktime_sub(now
, last_jiffies_update
);
70 if (delta
>= tick_period
) {
72 delta
= ktime_sub(delta
, tick_period
);
73 last_jiffies_update
= ktime_add(last_jiffies_update
,
76 /* Slow path for long timeouts */
77 if (unlikely(delta
>= tick_period
)) {
78 s64 incr
= ktime_to_ns(tick_period
);
80 ticks
= ktime_divns(delta
, incr
);
82 last_jiffies_update
= ktime_add_ns(last_jiffies_update
,
87 /* Keep the tick_next_period variable up to date */
88 tick_next_period
= ktime_add(last_jiffies_update
, tick_period
);
90 write_sequnlock(&jiffies_lock
);
93 write_sequnlock(&jiffies_lock
);
98 * Initialize and return retrieve the jiffies update.
100 static ktime_t
tick_init_jiffy_update(void)
104 write_seqlock(&jiffies_lock
);
105 /* Did we start the jiffies update yet ? */
106 if (last_jiffies_update
== 0)
107 last_jiffies_update
= tick_next_period
;
108 period
= last_jiffies_update
;
109 write_sequnlock(&jiffies_lock
);
113 static void tick_sched_do_timer(struct tick_sched
*ts
, ktime_t now
)
115 int cpu
= smp_processor_id();
117 #ifdef CONFIG_NO_HZ_COMMON
119 * Check if the do_timer duty was dropped. We don't care about
120 * concurrency: This happens only when the CPU in charge went
121 * into a long sleep. If two CPUs happen to assign themselves to
122 * this duty, then the jiffies update is still serialized by
125 * If nohz_full is enabled, this should not happen because the
126 * tick_do_timer_cpu never relinquishes.
128 if (unlikely(tick_do_timer_cpu
== TICK_DO_TIMER_NONE
)) {
129 #ifdef CONFIG_NO_HZ_FULL
130 WARN_ON(tick_nohz_full_running
);
132 tick_do_timer_cpu
= cpu
;
136 /* Check, if the jiffies need an update */
137 if (tick_do_timer_cpu
== cpu
)
138 tick_do_update_jiffies64(now
);
141 ts
->got_idle_tick
= 1;
144 static void tick_sched_handle(struct tick_sched
*ts
, struct pt_regs
*regs
)
146 #ifdef CONFIG_NO_HZ_COMMON
148 * When we are idle and the tick is stopped, we have to touch
149 * the watchdog as we might not schedule for a really long
150 * time. This happens on complete idle SMP systems while
151 * waiting on the login prompt. We also increment the "start of
152 * idle" jiffy stamp so the idle accounting adjustment we do
153 * when we go busy again does not account too much ticks.
155 if (ts
->tick_stopped
) {
156 touch_softlockup_watchdog_sched();
157 if (is_idle_task(current
))
160 * In case the current tick fired too early past its expected
161 * expiration, make sure we don't bypass the next clock reprogramming
162 * to the same deadline.
167 update_process_times(user_mode(regs
));
168 profile_tick(CPU_PROFILING
);
172 #ifdef CONFIG_NO_HZ_FULL
173 cpumask_var_t tick_nohz_full_mask
;
174 bool tick_nohz_full_running
;
175 static atomic_t tick_dep_mask
;
177 static bool check_tick_dependency(atomic_t
*dep
)
179 int val
= atomic_read(dep
);
181 if (val
& TICK_DEP_MASK_POSIX_TIMER
) {
182 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER
);
186 if (val
& TICK_DEP_MASK_PERF_EVENTS
) {
187 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS
);
191 if (val
& TICK_DEP_MASK_SCHED
) {
192 trace_tick_stop(0, TICK_DEP_MASK_SCHED
);
196 if (val
& TICK_DEP_MASK_CLOCK_UNSTABLE
) {
197 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE
);
204 static bool can_stop_full_tick(int cpu
, struct tick_sched
*ts
)
206 lockdep_assert_irqs_disabled();
208 if (unlikely(!cpu_online(cpu
)))
211 if (check_tick_dependency(&tick_dep_mask
))
214 if (check_tick_dependency(&ts
->tick_dep_mask
))
217 if (check_tick_dependency(¤t
->tick_dep_mask
))
220 if (check_tick_dependency(¤t
->signal
->tick_dep_mask
))
226 static void nohz_full_kick_func(struct irq_work
*work
)
228 /* Empty, the tick restart happens on tick_nohz_irq_exit() */
231 static DEFINE_PER_CPU(struct irq_work
, nohz_full_kick_work
) = {
232 .func
= nohz_full_kick_func
,
236 * Kick this CPU if it's full dynticks in order to force it to
237 * re-evaluate its dependency on the tick and restart it if necessary.
238 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
241 static void tick_nohz_full_kick(void)
243 if (!tick_nohz_full_cpu(smp_processor_id()))
246 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work
));
250 * Kick the CPU if it's full dynticks in order to force it to
251 * re-evaluate its dependency on the tick and restart it if necessary.
253 void tick_nohz_full_kick_cpu(int cpu
)
255 if (!tick_nohz_full_cpu(cpu
))
258 irq_work_queue_on(&per_cpu(nohz_full_kick_work
, cpu
), cpu
);
262 * Kick all full dynticks CPUs in order to force these to re-evaluate
263 * their dependency on the tick and restart it if necessary.
265 static void tick_nohz_full_kick_all(void)
269 if (!tick_nohz_full_running
)
273 for_each_cpu_and(cpu
, tick_nohz_full_mask
, cpu_online_mask
)
274 tick_nohz_full_kick_cpu(cpu
);
278 static void tick_nohz_dep_set_all(atomic_t
*dep
,
279 enum tick_dep_bits bit
)
283 prev
= atomic_fetch_or(BIT(bit
), dep
);
285 tick_nohz_full_kick_all();
289 * Set a global tick dependency. Used by perf events that rely on freq and
292 void tick_nohz_dep_set(enum tick_dep_bits bit
)
294 tick_nohz_dep_set_all(&tick_dep_mask
, bit
);
297 void tick_nohz_dep_clear(enum tick_dep_bits bit
)
299 atomic_andnot(BIT(bit
), &tick_dep_mask
);
303 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
304 * manage events throttling.
306 void tick_nohz_dep_set_cpu(int cpu
, enum tick_dep_bits bit
)
309 struct tick_sched
*ts
;
311 ts
= per_cpu_ptr(&tick_cpu_sched
, cpu
);
313 prev
= atomic_fetch_or(BIT(bit
), &ts
->tick_dep_mask
);
316 /* Perf needs local kick that is NMI safe */
317 if (cpu
== smp_processor_id()) {
318 tick_nohz_full_kick();
320 /* Remote irq work not NMI-safe */
321 if (!WARN_ON_ONCE(in_nmi()))
322 tick_nohz_full_kick_cpu(cpu
);
328 void tick_nohz_dep_clear_cpu(int cpu
, enum tick_dep_bits bit
)
330 struct tick_sched
*ts
= per_cpu_ptr(&tick_cpu_sched
, cpu
);
332 atomic_andnot(BIT(bit
), &ts
->tick_dep_mask
);
336 * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
339 void tick_nohz_dep_set_task(struct task_struct
*tsk
, enum tick_dep_bits bit
)
342 * We could optimize this with just kicking the target running the task
343 * if that noise matters for nohz full users.
345 tick_nohz_dep_set_all(&tsk
->tick_dep_mask
, bit
);
348 void tick_nohz_dep_clear_task(struct task_struct
*tsk
, enum tick_dep_bits bit
)
350 atomic_andnot(BIT(bit
), &tsk
->tick_dep_mask
);
354 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
355 * per process timers.
357 void tick_nohz_dep_set_signal(struct signal_struct
*sig
, enum tick_dep_bits bit
)
359 tick_nohz_dep_set_all(&sig
->tick_dep_mask
, bit
);
362 void tick_nohz_dep_clear_signal(struct signal_struct
*sig
, enum tick_dep_bits bit
)
364 atomic_andnot(BIT(bit
), &sig
->tick_dep_mask
);
368 * Re-evaluate the need for the tick as we switch the current task.
369 * It might need the tick due to per task/process properties:
370 * perf events, posix CPU timers, ...
372 void __tick_nohz_task_switch(void)
375 struct tick_sched
*ts
;
377 local_irq_save(flags
);
379 if (!tick_nohz_full_cpu(smp_processor_id()))
382 ts
= this_cpu_ptr(&tick_cpu_sched
);
384 if (ts
->tick_stopped
) {
385 if (atomic_read(¤t
->tick_dep_mask
) ||
386 atomic_read(¤t
->signal
->tick_dep_mask
))
387 tick_nohz_full_kick();
390 local_irq_restore(flags
);
393 /* Get the boot-time nohz CPU list from the kernel parameters. */
394 void __init
tick_nohz_full_setup(cpumask_var_t cpumask
)
396 alloc_bootmem_cpumask_var(&tick_nohz_full_mask
);
397 cpumask_copy(tick_nohz_full_mask
, cpumask
);
398 tick_nohz_full_running
= true;
401 static int tick_nohz_cpu_down(unsigned int cpu
)
404 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
405 * timers, workqueues, timekeeping, ...) on behalf of full dynticks
406 * CPUs. It must remain online when nohz full is enabled.
408 if (tick_nohz_full_running
&& tick_do_timer_cpu
== cpu
)
413 void __init
tick_nohz_init(void)
417 if (!tick_nohz_full_running
)
421 * Full dynticks uses irq work to drive the tick rescheduling on safe
422 * locking contexts. But then we need irq work to raise its own
423 * interrupts to avoid circular dependency on the tick
425 if (!arch_irq_work_has_interrupt()) {
426 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
427 cpumask_clear(tick_nohz_full_mask
);
428 tick_nohz_full_running
= false;
432 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP
) &&
433 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU
)) {
434 cpu
= smp_processor_id();
436 if (cpumask_test_cpu(cpu
, tick_nohz_full_mask
)) {
437 pr_warn("NO_HZ: Clearing %d from nohz_full range "
438 "for timekeeping\n", cpu
);
439 cpumask_clear_cpu(cpu
, tick_nohz_full_mask
);
443 for_each_cpu(cpu
, tick_nohz_full_mask
)
444 context_tracking_cpu_set(cpu
);
446 ret
= cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
,
447 "kernel/nohz:predown", NULL
,
450 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
451 cpumask_pr_args(tick_nohz_full_mask
));
456 * NOHZ - aka dynamic tick functionality
458 #ifdef CONFIG_NO_HZ_COMMON
462 bool tick_nohz_enabled __read_mostly
= true;
463 unsigned long tick_nohz_active __read_mostly
;
465 * Enable / Disable tickless mode
467 static int __init
setup_tick_nohz(char *str
)
469 return (kstrtobool(str
, &tick_nohz_enabled
) == 0);
472 __setup("nohz=", setup_tick_nohz
);
474 bool tick_nohz_tick_stopped(void)
476 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
478 return ts
->tick_stopped
;
481 bool tick_nohz_tick_stopped_cpu(int cpu
)
483 struct tick_sched
*ts
= per_cpu_ptr(&tick_cpu_sched
, cpu
);
485 return ts
->tick_stopped
;
489 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
491 * Called from interrupt entry when the CPU was idle
493 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
494 * must be updated. Otherwise an interrupt handler could use a stale jiffy
495 * value. We do this unconditionally on any CPU, as we don't know whether the
496 * CPU, which has the update task assigned is in a long sleep.
498 static void tick_nohz_update_jiffies(ktime_t now
)
502 __this_cpu_write(tick_cpu_sched
.idle_waketime
, now
);
504 local_irq_save(flags
);
505 tick_do_update_jiffies64(now
);
506 local_irq_restore(flags
);
508 touch_softlockup_watchdog_sched();
512 * Updates the per-CPU time idle statistics counters
515 update_ts_time_stats(int cpu
, struct tick_sched
*ts
, ktime_t now
, u64
*last_update_time
)
519 if (ts
->idle_active
) {
520 delta
= ktime_sub(now
, ts
->idle_entrytime
);
521 if (nr_iowait_cpu(cpu
) > 0)
522 ts
->iowait_sleeptime
= ktime_add(ts
->iowait_sleeptime
, delta
);
524 ts
->idle_sleeptime
= ktime_add(ts
->idle_sleeptime
, delta
);
525 ts
->idle_entrytime
= now
;
528 if (last_update_time
)
529 *last_update_time
= ktime_to_us(now
);
533 static void tick_nohz_stop_idle(struct tick_sched
*ts
, ktime_t now
)
535 update_ts_time_stats(smp_processor_id(), ts
, now
, NULL
);
538 sched_clock_idle_wakeup_event();
541 static void tick_nohz_start_idle(struct tick_sched
*ts
)
543 ts
->idle_entrytime
= ktime_get();
545 sched_clock_idle_sleep_event();
549 * get_cpu_idle_time_us - get the total idle time of a CPU
550 * @cpu: CPU number to query
551 * @last_update_time: variable to store update time in. Do not update
554 * Return the cumulative idle time (since boot) for a given
555 * CPU, in microseconds.
557 * This time is measured via accounting rather than sampling,
558 * and is as accurate as ktime_get() is.
560 * This function returns -1 if NOHZ is not enabled.
562 u64
get_cpu_idle_time_us(int cpu
, u64
*last_update_time
)
564 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
567 if (!tick_nohz_active
)
571 if (last_update_time
) {
572 update_ts_time_stats(cpu
, ts
, now
, last_update_time
);
573 idle
= ts
->idle_sleeptime
;
575 if (ts
->idle_active
&& !nr_iowait_cpu(cpu
)) {
576 ktime_t delta
= ktime_sub(now
, ts
->idle_entrytime
);
578 idle
= ktime_add(ts
->idle_sleeptime
, delta
);
580 idle
= ts
->idle_sleeptime
;
584 return ktime_to_us(idle
);
587 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us
);
590 * get_cpu_iowait_time_us - get the total iowait time of a CPU
591 * @cpu: CPU number to query
592 * @last_update_time: variable to store update time in. Do not update
595 * Return the cumulative iowait time (since boot) for a given
596 * CPU, in microseconds.
598 * This time is measured via accounting rather than sampling,
599 * and is as accurate as ktime_get() is.
601 * This function returns -1 if NOHZ is not enabled.
603 u64
get_cpu_iowait_time_us(int cpu
, u64
*last_update_time
)
605 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
608 if (!tick_nohz_active
)
612 if (last_update_time
) {
613 update_ts_time_stats(cpu
, ts
, now
, last_update_time
);
614 iowait
= ts
->iowait_sleeptime
;
616 if (ts
->idle_active
&& nr_iowait_cpu(cpu
) > 0) {
617 ktime_t delta
= ktime_sub(now
, ts
->idle_entrytime
);
619 iowait
= ktime_add(ts
->iowait_sleeptime
, delta
);
621 iowait
= ts
->iowait_sleeptime
;
625 return ktime_to_us(iowait
);
627 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us
);
629 static void tick_nohz_restart(struct tick_sched
*ts
, ktime_t now
)
631 hrtimer_cancel(&ts
->sched_timer
);
632 hrtimer_set_expires(&ts
->sched_timer
, ts
->last_tick
);
634 /* Forward the time to expire in the future */
635 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
637 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
)
638 hrtimer_start_expires(&ts
->sched_timer
, HRTIMER_MODE_ABS_PINNED
);
640 tick_program_event(hrtimer_get_expires(&ts
->sched_timer
), 1);
643 * Reset to make sure next tick stop doesn't get fooled by past
644 * cached clock deadline.
649 static inline bool local_timer_softirq_pending(void)
651 return local_softirq_pending() & BIT(TIMER_SOFTIRQ
);
654 static ktime_t
tick_nohz_next_event(struct tick_sched
*ts
, int cpu
)
656 u64 basemono
, next_tick
, next_tmr
, next_rcu
, delta
, expires
;
657 unsigned long basejiff
;
660 /* Read jiffies and the time when jiffies were updated last */
662 seq
= read_seqbegin(&jiffies_lock
);
663 basemono
= last_jiffies_update
;
665 } while (read_seqretry(&jiffies_lock
, seq
));
666 ts
->last_jiffies
= basejiff
;
667 ts
->timer_expires_base
= basemono
;
670 * Keep the periodic tick, when RCU, architecture or irq_work
672 * Aside of that check whether the local timer softirq is
673 * pending. If so its a bad idea to call get_next_timer_interrupt()
674 * because there is an already expired timer, so it will request
675 * immeditate expiry, which rearms the hardware timer with a
676 * minimal delta which brings us back to this place
677 * immediately. Lather, rinse and repeat...
679 if (rcu_needs_cpu(basemono
, &next_rcu
) || arch_needs_cpu() ||
680 irq_work_needs_cpu() || local_timer_softirq_pending()) {
681 next_tick
= basemono
+ TICK_NSEC
;
684 * Get the next pending timer. If high resolution
685 * timers are enabled this only takes the timer wheel
686 * timers into account. If high resolution timers are
687 * disabled this also looks at the next expiring
690 next_tmr
= get_next_timer_interrupt(basejiff
, basemono
);
691 ts
->next_timer
= next_tmr
;
692 /* Take the next rcu event into account */
693 next_tick
= next_rcu
< next_tmr
? next_rcu
: next_tmr
;
697 * If the tick is due in the next period, keep it ticking or
698 * force prod the timer.
700 delta
= next_tick
- basemono
;
701 if (delta
<= (u64
)TICK_NSEC
) {
703 * Tell the timer code that the base is not idle, i.e. undo
704 * the effect of get_next_timer_interrupt():
708 * We've not stopped the tick yet, and there's a timer in the
709 * next period, so no point in stopping it either, bail.
711 if (!ts
->tick_stopped
) {
712 ts
->timer_expires
= 0;
718 * If this CPU is the one which had the do_timer() duty last, we limit
719 * the sleep time to the timekeeping max_deferment value.
720 * Otherwise we can sleep as long as we want.
722 delta
= timekeeping_max_deferment();
723 if (cpu
!= tick_do_timer_cpu
&&
724 (tick_do_timer_cpu
!= TICK_DO_TIMER_NONE
|| !ts
->do_timer_last
))
727 /* Calculate the next expiry time */
728 if (delta
< (KTIME_MAX
- basemono
))
729 expires
= basemono
+ delta
;
733 ts
->timer_expires
= min_t(u64
, expires
, next_tick
);
736 return ts
->timer_expires
;
739 static void tick_nohz_stop_tick(struct tick_sched
*ts
, int cpu
)
741 struct clock_event_device
*dev
= __this_cpu_read(tick_cpu_device
.evtdev
);
742 u64 basemono
= ts
->timer_expires_base
;
743 u64 expires
= ts
->timer_expires
;
744 ktime_t tick
= expires
;
746 /* Make sure we won't be trying to stop it twice in a row. */
747 ts
->timer_expires_base
= 0;
750 * If this CPU is the one which updates jiffies, then give up
751 * the assignment and let it be taken by the CPU which runs
752 * the tick timer next, which might be this CPU as well. If we
753 * don't drop this here the jiffies might be stale and
754 * do_timer() never invoked. Keep track of the fact that it
755 * was the one which had the do_timer() duty last.
757 if (cpu
== tick_do_timer_cpu
) {
758 tick_do_timer_cpu
= TICK_DO_TIMER_NONE
;
759 ts
->do_timer_last
= 1;
760 } else if (tick_do_timer_cpu
!= TICK_DO_TIMER_NONE
) {
761 ts
->do_timer_last
= 0;
764 /* Skip reprogram of event if its not changed */
765 if (ts
->tick_stopped
&& (expires
== ts
->next_tick
)) {
766 /* Sanity check: make sure clockevent is actually programmed */
767 if (tick
== KTIME_MAX
|| ts
->next_tick
== hrtimer_get_expires(&ts
->sched_timer
))
771 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
772 basemono
, ts
->next_tick
, dev
->next_event
,
773 hrtimer_active(&ts
->sched_timer
), hrtimer_get_expires(&ts
->sched_timer
));
777 * nohz_stop_sched_tick can be called several times before
778 * the nohz_restart_sched_tick is called. This happens when
779 * interrupts arrive which do not cause a reschedule. In the
780 * first call we save the current tick time, so we can restart
781 * the scheduler tick in nohz_restart_sched_tick.
783 if (!ts
->tick_stopped
) {
784 calc_load_nohz_start();
787 ts
->last_tick
= hrtimer_get_expires(&ts
->sched_timer
);
788 ts
->tick_stopped
= 1;
789 trace_tick_stop(1, TICK_DEP_MASK_NONE
);
792 ts
->next_tick
= tick
;
795 * If the expiration time == KTIME_MAX, then we simply stop
798 if (unlikely(expires
== KTIME_MAX
)) {
799 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
)
800 hrtimer_cancel(&ts
->sched_timer
);
804 if (ts
->nohz_mode
== NOHZ_MODE_HIGHRES
) {
805 hrtimer_start(&ts
->sched_timer
, tick
, HRTIMER_MODE_ABS_PINNED
);
807 hrtimer_set_expires(&ts
->sched_timer
, tick
);
808 tick_program_event(tick
, 1);
812 static void tick_nohz_retain_tick(struct tick_sched
*ts
)
814 ts
->timer_expires_base
= 0;
817 #ifdef CONFIG_NO_HZ_FULL
818 static void tick_nohz_stop_sched_tick(struct tick_sched
*ts
, int cpu
)
820 if (tick_nohz_next_event(ts
, cpu
))
821 tick_nohz_stop_tick(ts
, cpu
);
823 tick_nohz_retain_tick(ts
);
825 #endif /* CONFIG_NO_HZ_FULL */
827 static void tick_nohz_restart_sched_tick(struct tick_sched
*ts
, ktime_t now
)
829 /* Update jiffies first */
830 tick_do_update_jiffies64(now
);
833 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
834 * the clock forward checks in the enqueue path:
838 calc_load_nohz_stop();
839 touch_softlockup_watchdog_sched();
841 * Cancel the scheduled timer and restore the tick
843 ts
->tick_stopped
= 0;
844 ts
->idle_exittime
= now
;
846 tick_nohz_restart(ts
, now
);
849 static void tick_nohz_full_update_tick(struct tick_sched
*ts
)
851 #ifdef CONFIG_NO_HZ_FULL
852 int cpu
= smp_processor_id();
854 if (!tick_nohz_full_cpu(cpu
))
857 if (!ts
->tick_stopped
&& ts
->nohz_mode
== NOHZ_MODE_INACTIVE
)
860 if (can_stop_full_tick(cpu
, ts
))
861 tick_nohz_stop_sched_tick(ts
, cpu
);
862 else if (ts
->tick_stopped
)
863 tick_nohz_restart_sched_tick(ts
, ktime_get());
867 static bool can_stop_idle_tick(int cpu
, struct tick_sched
*ts
)
870 * If this CPU is offline and it is the one which updates
871 * jiffies, then give up the assignment and let it be taken by
872 * the CPU which runs the tick timer next. If we don't drop
873 * this here the jiffies might be stale and do_timer() never
876 if (unlikely(!cpu_online(cpu
))) {
877 if (cpu
== tick_do_timer_cpu
)
878 tick_do_timer_cpu
= TICK_DO_TIMER_NONE
;
880 * Make sure the CPU doesn't get fooled by obsolete tick
881 * deadline if it comes back online later.
887 if (unlikely(ts
->nohz_mode
== NOHZ_MODE_INACTIVE
))
893 if (unlikely(local_softirq_pending())) {
894 static int ratelimit
;
896 if (ratelimit
< 10 &&
897 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK
)) {
898 pr_warn("NOHZ: local_softirq_pending %02x\n",
899 (unsigned int) local_softirq_pending());
905 if (tick_nohz_full_enabled()) {
907 * Keep the tick alive to guarantee timekeeping progression
908 * if there are full dynticks CPUs around
910 if (tick_do_timer_cpu
== cpu
)
913 * Boot safety: make sure the timekeeping duty has been
914 * assigned before entering dyntick-idle mode,
915 * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
917 if (unlikely(tick_do_timer_cpu
== TICK_DO_TIMER_BOOT
))
920 /* Should not happen for nohz-full */
921 if (WARN_ON_ONCE(tick_do_timer_cpu
== TICK_DO_TIMER_NONE
))
928 static void __tick_nohz_idle_stop_tick(struct tick_sched
*ts
)
931 int cpu
= smp_processor_id();
934 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
935 * tick timer expiration time is known already.
937 if (ts
->timer_expires_base
)
938 expires
= ts
->timer_expires
;
939 else if (can_stop_idle_tick(cpu
, ts
))
940 expires
= tick_nohz_next_event(ts
, cpu
);
947 int was_stopped
= ts
->tick_stopped
;
949 tick_nohz_stop_tick(ts
, cpu
);
952 ts
->idle_expires
= expires
;
954 if (!was_stopped
&& ts
->tick_stopped
) {
955 ts
->idle_jiffies
= ts
->last_jiffies
;
956 nohz_balance_enter_idle(cpu
);
959 tick_nohz_retain_tick(ts
);
964 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
966 * When the next event is more than a tick into the future, stop the idle tick
968 void tick_nohz_idle_stop_tick(void)
970 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched
));
973 void tick_nohz_idle_retain_tick(void)
975 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched
));
977 * Undo the effect of get_next_timer_interrupt() called from
978 * tick_nohz_next_event().
984 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
986 * Called when we start the idle loop.
988 void tick_nohz_idle_enter(void)
990 struct tick_sched
*ts
;
992 lockdep_assert_irqs_enabled();
996 ts
= this_cpu_ptr(&tick_cpu_sched
);
998 WARN_ON_ONCE(ts
->timer_expires_base
);
1001 tick_nohz_start_idle(ts
);
1007 * tick_nohz_irq_exit - update next tick event from interrupt exit
1009 * When an interrupt fires while we are idle and it doesn't cause
1010 * a reschedule, it may still add, modify or delete a timer, enqueue
1011 * an RCU callback, etc...
1012 * So we need to re-calculate and reprogram the next tick event.
1014 void tick_nohz_irq_exit(void)
1016 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1019 tick_nohz_start_idle(ts
);
1021 tick_nohz_full_update_tick(ts
);
1025 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1027 bool tick_nohz_idle_got_tick(void)
1029 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1031 if (ts
->got_idle_tick
) {
1032 ts
->got_idle_tick
= 0;
1039 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1040 * or the tick, whatever that expires first. Note that, if the tick has been
1041 * stopped, it returns the next hrtimer.
1043 * Called from power state control code with interrupts disabled
1045 ktime_t
tick_nohz_get_next_hrtimer(void)
1047 return __this_cpu_read(tick_cpu_device
.evtdev
)->next_event
;
1051 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1052 * @delta_next: duration until the next event if the tick cannot be stopped
1054 * Called from power state control code with interrupts disabled
1056 ktime_t
tick_nohz_get_sleep_length(ktime_t
*delta_next
)
1058 struct clock_event_device
*dev
= __this_cpu_read(tick_cpu_device
.evtdev
);
1059 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1060 int cpu
= smp_processor_id();
1062 * The idle entry time is expected to be a sufficient approximation of
1063 * the current time at this point.
1065 ktime_t now
= ts
->idle_entrytime
;
1068 WARN_ON_ONCE(!ts
->inidle
);
1070 *delta_next
= ktime_sub(dev
->next_event
, now
);
1072 if (!can_stop_idle_tick(cpu
, ts
))
1075 next_event
= tick_nohz_next_event(ts
, cpu
);
1080 * If the next highres timer to expire is earlier than next_event, the
1081 * idle governor needs to know that.
1083 next_event
= min_t(u64
, next_event
,
1084 hrtimer_next_event_without(&ts
->sched_timer
));
1086 return ktime_sub(next_event
, now
);
1090 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1091 * for a particular CPU.
1093 * Called from the schedutil frequency scaling governor in scheduler context.
1095 unsigned long tick_nohz_get_idle_calls_cpu(int cpu
)
1097 struct tick_sched
*ts
= tick_get_tick_sched(cpu
);
1099 return ts
->idle_calls
;
1103 * tick_nohz_get_idle_calls - return the current idle calls counter value
1105 * Called from the schedutil frequency scaling governor in scheduler context.
1107 unsigned long tick_nohz_get_idle_calls(void)
1109 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1111 return ts
->idle_calls
;
1114 static void tick_nohz_account_idle_ticks(struct tick_sched
*ts
)
1116 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1117 unsigned long ticks
;
1119 if (vtime_accounting_cpu_enabled())
1122 * We stopped the tick in idle. Update process times would miss the
1123 * time we slept as update_process_times does only a 1 tick
1124 * accounting. Enforce that this is accounted to idle !
1126 ticks
= jiffies
- ts
->idle_jiffies
;
1128 * We might be one off. Do not randomly account a huge number of ticks!
1130 if (ticks
&& ticks
< LONG_MAX
)
1131 account_idle_ticks(ticks
);
1135 static void __tick_nohz_idle_restart_tick(struct tick_sched
*ts
, ktime_t now
)
1137 tick_nohz_restart_sched_tick(ts
, now
);
1138 tick_nohz_account_idle_ticks(ts
);
1141 void tick_nohz_idle_restart_tick(void)
1143 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1145 if (ts
->tick_stopped
)
1146 __tick_nohz_idle_restart_tick(ts
, ktime_get());
1150 * tick_nohz_idle_exit - restart the idle tick from the idle task
1152 * Restart the idle tick when the CPU is woken up from idle
1153 * This also exit the RCU extended quiescent state. The CPU
1154 * can use RCU again after this function is called.
1156 void tick_nohz_idle_exit(void)
1158 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1159 bool idle_active
, tick_stopped
;
1162 local_irq_disable();
1164 WARN_ON_ONCE(!ts
->inidle
);
1165 WARN_ON_ONCE(ts
->timer_expires_base
);
1168 idle_active
= ts
->idle_active
;
1169 tick_stopped
= ts
->tick_stopped
;
1171 if (idle_active
|| tick_stopped
)
1175 tick_nohz_stop_idle(ts
, now
);
1178 __tick_nohz_idle_restart_tick(ts
, now
);
1184 * The nohz low res interrupt handler
1186 static void tick_nohz_handler(struct clock_event_device
*dev
)
1188 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1189 struct pt_regs
*regs
= get_irq_regs();
1190 ktime_t now
= ktime_get();
1192 dev
->next_event
= KTIME_MAX
;
1194 tick_sched_do_timer(ts
, now
);
1195 tick_sched_handle(ts
, regs
);
1197 /* No need to reprogram if we are running tickless */
1198 if (unlikely(ts
->tick_stopped
))
1201 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
1202 tick_program_event(hrtimer_get_expires(&ts
->sched_timer
), 1);
1205 static inline void tick_nohz_activate(struct tick_sched
*ts
, int mode
)
1207 if (!tick_nohz_enabled
)
1209 ts
->nohz_mode
= mode
;
1210 /* One update is enough */
1211 if (!test_and_set_bit(0, &tick_nohz_active
))
1212 timers_update_nohz();
1216 * tick_nohz_switch_to_nohz - switch to nohz mode
1218 static void tick_nohz_switch_to_nohz(void)
1220 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1223 if (!tick_nohz_enabled
)
1226 if (tick_switch_to_oneshot(tick_nohz_handler
))
1230 * Recycle the hrtimer in ts, so we can share the
1231 * hrtimer_forward with the highres code.
1233 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1234 /* Get the next period */
1235 next
= tick_init_jiffy_update();
1237 hrtimer_set_expires(&ts
->sched_timer
, next
);
1238 hrtimer_forward_now(&ts
->sched_timer
, tick_period
);
1239 tick_program_event(hrtimer_get_expires(&ts
->sched_timer
), 1);
1240 tick_nohz_activate(ts
, NOHZ_MODE_LOWRES
);
1243 static inline void tick_nohz_irq_enter(void)
1245 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1248 if (!ts
->idle_active
&& !ts
->tick_stopped
)
1251 if (ts
->idle_active
)
1252 tick_nohz_stop_idle(ts
, now
);
1253 if (ts
->tick_stopped
)
1254 tick_nohz_update_jiffies(now
);
1259 static inline void tick_nohz_switch_to_nohz(void) { }
1260 static inline void tick_nohz_irq_enter(void) { }
1261 static inline void tick_nohz_activate(struct tick_sched
*ts
, int mode
) { }
1263 #endif /* CONFIG_NO_HZ_COMMON */
1266 * Called from irq_enter to notify about the possible interruption of idle()
1268 void tick_irq_enter(void)
1270 tick_check_oneshot_broadcast_this_cpu();
1271 tick_nohz_irq_enter();
1275 * High resolution timer specific code
1277 #ifdef CONFIG_HIGH_RES_TIMERS
1279 * We rearm the timer until we get disabled by the idle code.
1280 * Called with interrupts disabled.
1282 static enum hrtimer_restart
tick_sched_timer(struct hrtimer
*timer
)
1284 struct tick_sched
*ts
=
1285 container_of(timer
, struct tick_sched
, sched_timer
);
1286 struct pt_regs
*regs
= get_irq_regs();
1287 ktime_t now
= ktime_get();
1289 tick_sched_do_timer(ts
, now
);
1292 * Do not call, when we are not in irq context and have
1293 * no valid regs pointer
1296 tick_sched_handle(ts
, regs
);
1300 /* No need to reprogram if we are in idle or full dynticks mode */
1301 if (unlikely(ts
->tick_stopped
))
1302 return HRTIMER_NORESTART
;
1304 hrtimer_forward(timer
, now
, tick_period
);
1306 return HRTIMER_RESTART
;
1309 static int sched_skew_tick
;
1311 static int __init
skew_tick(char *str
)
1313 get_option(&str
, &sched_skew_tick
);
1317 early_param("skew_tick", skew_tick
);
1320 * tick_setup_sched_timer - setup the tick emulation timer
1322 void tick_setup_sched_timer(void)
1324 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1325 ktime_t now
= ktime_get();
1328 * Emulate tick processing via per-CPU hrtimers:
1330 hrtimer_init(&ts
->sched_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1331 ts
->sched_timer
.function
= tick_sched_timer
;
1333 /* Get the next period (per-CPU) */
1334 hrtimer_set_expires(&ts
->sched_timer
, tick_init_jiffy_update());
1336 /* Offset the tick to avert jiffies_lock contention. */
1337 if (sched_skew_tick
) {
1338 u64 offset
= ktime_to_ns(tick_period
) >> 1;
1339 do_div(offset
, num_possible_cpus());
1340 offset
*= smp_processor_id();
1341 hrtimer_add_expires_ns(&ts
->sched_timer
, offset
);
1344 hrtimer_forward(&ts
->sched_timer
, now
, tick_period
);
1345 hrtimer_start_expires(&ts
->sched_timer
, HRTIMER_MODE_ABS_PINNED
);
1346 tick_nohz_activate(ts
, NOHZ_MODE_HIGHRES
);
1348 #endif /* HIGH_RES_TIMERS */
1350 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1351 void tick_cancel_sched_timer(int cpu
)
1353 struct tick_sched
*ts
= &per_cpu(tick_cpu_sched
, cpu
);
1355 # ifdef CONFIG_HIGH_RES_TIMERS
1356 if (ts
->sched_timer
.base
)
1357 hrtimer_cancel(&ts
->sched_timer
);
1360 memset(ts
, 0, sizeof(*ts
));
1365 * Async notification about clocksource changes
1367 void tick_clock_notify(void)
1371 for_each_possible_cpu(cpu
)
1372 set_bit(0, &per_cpu(tick_cpu_sched
, cpu
).check_clocks
);
1376 * Async notification about clock event changes
1378 void tick_oneshot_notify(void)
1380 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1382 set_bit(0, &ts
->check_clocks
);
1386 * Check, if a change happened, which makes oneshot possible.
1388 * Called cyclic from the hrtimer softirq (driven by the timer
1389 * softirq) allow_nohz signals, that we can switch into low-res nohz
1390 * mode, because high resolution timers are disabled (either compile
1391 * or runtime). Called with interrupts disabled.
1393 int tick_check_oneshot_change(int allow_nohz
)
1395 struct tick_sched
*ts
= this_cpu_ptr(&tick_cpu_sched
);
1397 if (!test_and_clear_bit(0, &ts
->check_clocks
))
1400 if (ts
->nohz_mode
!= NOHZ_MODE_INACTIVE
)
1403 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1409 tick_nohz_switch_to_nohz();