1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API, aka timer wheel,
10 * hrtimers provide finer resolution and accuracy depending on system
11 * configuration and capabilities.
13 * Started by: Thomas Gleixner and Ingo Molnar
16 * Based on the original timer wheel code
18 * Help, testing, suggestions, bugfixes, improvements were
21 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
25 #include <linux/cpu.h>
26 #include <linux/export.h>
27 #include <linux/percpu.h>
28 #include <linux/hrtimer.h>
29 #include <linux/notifier.h>
30 #include <linux/syscalls.h>
31 #include <linux/interrupt.h>
32 #include <linux/tick.h>
33 #include <linux/err.h>
34 #include <linux/debugobjects.h>
35 #include <linux/sched/signal.h>
36 #include <linux/sched/sysctl.h>
37 #include <linux/sched/rt.h>
38 #include <linux/sched/deadline.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/debug.h>
41 #include <linux/timer.h>
42 #include <linux/freezer.h>
43 #include <linux/compat.h>
45 #include <linux/uaccess.h>
47 #include <trace/events/timer.h>
49 #include "tick-internal.h"
52 * Masks for selecting the soft and hard context timers from
55 #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
56 #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
57 #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
58 #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
63 * There are more clockids than hrtimer bases. Thus, we index
64 * into the timer bases by the hrtimer_base_type enum. When trying
65 * to reach a base using a clockid, hrtimer_clockid_to_base()
66 * is used to convert from clockid to the proper hrtimer_base_type.
68 DEFINE_PER_CPU(struct hrtimer_cpu_base
, hrtimer_bases
) =
70 .lock
= __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases
.lock
),
74 .index
= HRTIMER_BASE_MONOTONIC
,
75 .clockid
= CLOCK_MONOTONIC
,
76 .get_time
= &ktime_get
,
79 .index
= HRTIMER_BASE_REALTIME
,
80 .clockid
= CLOCK_REALTIME
,
81 .get_time
= &ktime_get_real
,
84 .index
= HRTIMER_BASE_BOOTTIME
,
85 .clockid
= CLOCK_BOOTTIME
,
86 .get_time
= &ktime_get_boottime
,
89 .index
= HRTIMER_BASE_TAI
,
91 .get_time
= &ktime_get_clocktai
,
94 .index
= HRTIMER_BASE_MONOTONIC_SOFT
,
95 .clockid
= CLOCK_MONOTONIC
,
96 .get_time
= &ktime_get
,
99 .index
= HRTIMER_BASE_REALTIME_SOFT
,
100 .clockid
= CLOCK_REALTIME
,
101 .get_time
= &ktime_get_real
,
104 .index
= HRTIMER_BASE_BOOTTIME_SOFT
,
105 .clockid
= CLOCK_BOOTTIME
,
106 .get_time
= &ktime_get_boottime
,
109 .index
= HRTIMER_BASE_TAI_SOFT
,
110 .clockid
= CLOCK_TAI
,
111 .get_time
= &ktime_get_clocktai
,
116 static const int hrtimer_clock_to_base_table
[MAX_CLOCKS
] = {
117 /* Make sure we catch unsupported clockids */
118 [0 ... MAX_CLOCKS
- 1] = HRTIMER_MAX_CLOCK_BASES
,
120 [CLOCK_REALTIME
] = HRTIMER_BASE_REALTIME
,
121 [CLOCK_MONOTONIC
] = HRTIMER_BASE_MONOTONIC
,
122 [CLOCK_BOOTTIME
] = HRTIMER_BASE_BOOTTIME
,
123 [CLOCK_TAI
] = HRTIMER_BASE_TAI
,
127 * Functions and macros which are different for UP/SMP systems are kept in a
133 * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
134 * such that hrtimer_callback_running() can unconditionally dereference
135 * timer->base->cpu_base
137 static struct hrtimer_cpu_base migration_cpu_base
= {
139 .cpu_base
= &migration_cpu_base
,
140 .seq
= SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base
.seq
,
141 &migration_cpu_base
.lock
),
145 #define migration_base migration_cpu_base.clock_base[0]
147 static inline bool is_migration_base(struct hrtimer_clock_base
*base
)
149 return base
== &migration_base
;
153 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
154 * means that all timers which are tied to this base via timer->base are
155 * locked, and the base itself is locked too.
157 * So __run_timers/migrate_timers can safely modify all timers which could
158 * be found on the lists/queues.
160 * When the timer's base is locked, and the timer removed from list, it is
161 * possible to set timer->base = &migration_base and drop the lock: the timer
165 struct hrtimer_clock_base
*lock_hrtimer_base(const struct hrtimer
*timer
,
166 unsigned long *flags
)
167 __acquires(&timer
->base
->lock
)
169 struct hrtimer_clock_base
*base
;
172 base
= READ_ONCE(timer
->base
);
173 if (likely(base
!= &migration_base
)) {
174 raw_spin_lock_irqsave(&base
->cpu_base
->lock
, *flags
);
175 if (likely(base
== timer
->base
))
177 /* The timer has migrated to another CPU: */
178 raw_spin_unlock_irqrestore(&base
->cpu_base
->lock
, *flags
);
185 * We do not migrate the timer when it is expiring before the next
186 * event on the target cpu. When high resolution is enabled, we cannot
187 * reprogram the target cpu hardware and we would cause it to fire
188 * late. To keep it simple, we handle the high resolution enabled and
189 * disabled case similar.
191 * Called with cpu_base->lock of target cpu held.
194 hrtimer_check_target(struct hrtimer
*timer
, struct hrtimer_clock_base
*new_base
)
198 expires
= ktime_sub(hrtimer_get_expires(timer
), new_base
->offset
);
199 return expires
< new_base
->cpu_base
->expires_next
;
203 struct hrtimer_cpu_base
*get_target_base(struct hrtimer_cpu_base
*base
,
206 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
207 if (static_branch_likely(&timers_migration_enabled
) && !pinned
)
208 return &per_cpu(hrtimer_bases
, get_nohz_timer_target());
214 * We switch the timer base to a power-optimized selected CPU target,
216 * - NO_HZ_COMMON is enabled
217 * - timer migration is enabled
218 * - the timer callback is not running
219 * - the timer is not the first expiring timer on the new target
221 * If one of the above requirements is not fulfilled we move the timer
222 * to the current CPU or leave it on the previously assigned CPU if
223 * the timer callback is currently running.
225 static inline struct hrtimer_clock_base
*
226 switch_hrtimer_base(struct hrtimer
*timer
, struct hrtimer_clock_base
*base
,
229 struct hrtimer_cpu_base
*new_cpu_base
, *this_cpu_base
;
230 struct hrtimer_clock_base
*new_base
;
231 int basenum
= base
->index
;
233 this_cpu_base
= this_cpu_ptr(&hrtimer_bases
);
234 new_cpu_base
= get_target_base(this_cpu_base
, pinned
);
236 new_base
= &new_cpu_base
->clock_base
[basenum
];
238 if (base
!= new_base
) {
240 * We are trying to move timer to new_base.
241 * However we can't change timer's base while it is running,
242 * so we keep it on the same CPU. No hassle vs. reprogramming
243 * the event source in the high resolution case. The softirq
244 * code will take care of this when the timer function has
245 * completed. There is no conflict as we hold the lock until
246 * the timer is enqueued.
248 if (unlikely(hrtimer_callback_running(timer
)))
251 /* See the comment in lock_hrtimer_base() */
252 WRITE_ONCE(timer
->base
, &migration_base
);
253 raw_spin_unlock(&base
->cpu_base
->lock
);
254 raw_spin_lock(&new_base
->cpu_base
->lock
);
256 if (new_cpu_base
!= this_cpu_base
&&
257 hrtimer_check_target(timer
, new_base
)) {
258 raw_spin_unlock(&new_base
->cpu_base
->lock
);
259 raw_spin_lock(&base
->cpu_base
->lock
);
260 new_cpu_base
= this_cpu_base
;
261 WRITE_ONCE(timer
->base
, base
);
264 WRITE_ONCE(timer
->base
, new_base
);
266 if (new_cpu_base
!= this_cpu_base
&&
267 hrtimer_check_target(timer
, new_base
)) {
268 new_cpu_base
= this_cpu_base
;
275 #else /* CONFIG_SMP */
277 static inline bool is_migration_base(struct hrtimer_clock_base
*base
)
282 static inline struct hrtimer_clock_base
*
283 lock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
284 __acquires(&timer
->base
->cpu_base
->lock
)
286 struct hrtimer_clock_base
*base
= timer
->base
;
288 raw_spin_lock_irqsave(&base
->cpu_base
->lock
, *flags
);
293 # define switch_hrtimer_base(t, b, p) (b)
295 #endif /* !CONFIG_SMP */
298 * Functions for the union type storage format of ktime_t which are
299 * too large for inlining:
301 #if BITS_PER_LONG < 64
303 * Divide a ktime value by a nanosecond value
305 s64
__ktime_divns(const ktime_t kt
, s64 div
)
311 dclc
= ktime_to_ns(kt
);
312 tmp
= dclc
< 0 ? -dclc
: dclc
;
314 /* Make sure the divisor is less than 2^32: */
320 do_div(tmp
, (u32
) div
);
321 return dclc
< 0 ? -tmp
: tmp
;
323 EXPORT_SYMBOL_GPL(__ktime_divns
);
324 #endif /* BITS_PER_LONG >= 64 */
327 * Add two ktime values and do a safety check for overflow:
329 ktime_t
ktime_add_safe(const ktime_t lhs
, const ktime_t rhs
)
331 ktime_t res
= ktime_add_unsafe(lhs
, rhs
);
334 * We use KTIME_SEC_MAX here, the maximum timeout which we can
335 * return to user space in a timespec:
337 if (res
< 0 || res
< lhs
|| res
< rhs
)
338 res
= ktime_set(KTIME_SEC_MAX
, 0);
343 EXPORT_SYMBOL_GPL(ktime_add_safe
);
345 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
347 static const struct debug_obj_descr hrtimer_debug_descr
;
349 static void *hrtimer_debug_hint(void *addr
)
351 return ((struct hrtimer
*) addr
)->function
;
355 * fixup_init is called when:
356 * - an active object is initialized
358 static bool hrtimer_fixup_init(void *addr
, enum debug_obj_state state
)
360 struct hrtimer
*timer
= addr
;
363 case ODEBUG_STATE_ACTIVE
:
364 hrtimer_cancel(timer
);
365 debug_object_init(timer
, &hrtimer_debug_descr
);
373 * fixup_activate is called when:
374 * - an active object is activated
375 * - an unknown non-static object is activated
377 static bool hrtimer_fixup_activate(void *addr
, enum debug_obj_state state
)
380 case ODEBUG_STATE_ACTIVE
:
389 * fixup_free is called when:
390 * - an active object is freed
392 static bool hrtimer_fixup_free(void *addr
, enum debug_obj_state state
)
394 struct hrtimer
*timer
= addr
;
397 case ODEBUG_STATE_ACTIVE
:
398 hrtimer_cancel(timer
);
399 debug_object_free(timer
, &hrtimer_debug_descr
);
406 static const struct debug_obj_descr hrtimer_debug_descr
= {
408 .debug_hint
= hrtimer_debug_hint
,
409 .fixup_init
= hrtimer_fixup_init
,
410 .fixup_activate
= hrtimer_fixup_activate
,
411 .fixup_free
= hrtimer_fixup_free
,
414 static inline void debug_hrtimer_init(struct hrtimer
*timer
)
416 debug_object_init(timer
, &hrtimer_debug_descr
);
419 static inline void debug_hrtimer_activate(struct hrtimer
*timer
,
420 enum hrtimer_mode mode
)
422 debug_object_activate(timer
, &hrtimer_debug_descr
);
425 static inline void debug_hrtimer_deactivate(struct hrtimer
*timer
)
427 debug_object_deactivate(timer
, &hrtimer_debug_descr
);
430 static void __hrtimer_init(struct hrtimer
*timer
, clockid_t clock_id
,
431 enum hrtimer_mode mode
);
433 void hrtimer_init_on_stack(struct hrtimer
*timer
, clockid_t clock_id
,
434 enum hrtimer_mode mode
)
436 debug_object_init_on_stack(timer
, &hrtimer_debug_descr
);
437 __hrtimer_init(timer
, clock_id
, mode
);
439 EXPORT_SYMBOL_GPL(hrtimer_init_on_stack
);
441 static void __hrtimer_init_sleeper(struct hrtimer_sleeper
*sl
,
442 clockid_t clock_id
, enum hrtimer_mode mode
);
444 void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper
*sl
,
445 clockid_t clock_id
, enum hrtimer_mode mode
)
447 debug_object_init_on_stack(&sl
->timer
, &hrtimer_debug_descr
);
448 __hrtimer_init_sleeper(sl
, clock_id
, mode
);
450 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack
);
452 void destroy_hrtimer_on_stack(struct hrtimer
*timer
)
454 debug_object_free(timer
, &hrtimer_debug_descr
);
456 EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack
);
460 static inline void debug_hrtimer_init(struct hrtimer
*timer
) { }
461 static inline void debug_hrtimer_activate(struct hrtimer
*timer
,
462 enum hrtimer_mode mode
) { }
463 static inline void debug_hrtimer_deactivate(struct hrtimer
*timer
) { }
467 debug_init(struct hrtimer
*timer
, clockid_t clockid
,
468 enum hrtimer_mode mode
)
470 debug_hrtimer_init(timer
);
471 trace_hrtimer_init(timer
, clockid
, mode
);
474 static inline void debug_activate(struct hrtimer
*timer
,
475 enum hrtimer_mode mode
)
477 debug_hrtimer_activate(timer
, mode
);
478 trace_hrtimer_start(timer
, mode
);
481 static inline void debug_deactivate(struct hrtimer
*timer
)
483 debug_hrtimer_deactivate(timer
);
484 trace_hrtimer_cancel(timer
);
487 static struct hrtimer_clock_base
*
488 __next_base(struct hrtimer_cpu_base
*cpu_base
, unsigned int *active
)
495 idx
= __ffs(*active
);
496 *active
&= ~(1U << idx
);
498 return &cpu_base
->clock_base
[idx
];
501 #define for_each_active_base(base, cpu_base, active) \
502 while ((base = __next_base((cpu_base), &(active))))
504 static ktime_t
__hrtimer_next_event_base(struct hrtimer_cpu_base
*cpu_base
,
505 const struct hrtimer
*exclude
,
507 ktime_t expires_next
)
509 struct hrtimer_clock_base
*base
;
512 for_each_active_base(base
, cpu_base
, active
) {
513 struct timerqueue_node
*next
;
514 struct hrtimer
*timer
;
516 next
= timerqueue_getnext(&base
->active
);
517 timer
= container_of(next
, struct hrtimer
, node
);
518 if (timer
== exclude
) {
519 /* Get to the next timer in the queue. */
520 next
= timerqueue_iterate_next(next
);
524 timer
= container_of(next
, struct hrtimer
, node
);
526 expires
= ktime_sub(hrtimer_get_expires(timer
), base
->offset
);
527 if (expires
< expires_next
) {
528 expires_next
= expires
;
530 /* Skip cpu_base update if a timer is being excluded. */
535 cpu_base
->softirq_next_timer
= timer
;
537 cpu_base
->next_timer
= timer
;
541 * clock_was_set() might have changed base->offset of any of
542 * the clock bases so the result might be negative. Fix it up
543 * to prevent a false positive in clockevents_program_event().
545 if (expires_next
< 0)
551 * Recomputes cpu_base::*next_timer and returns the earliest expires_next
552 * but does not set cpu_base::*expires_next, that is done by
553 * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
554 * cpu_base::*expires_next right away, reprogramming logic would no longer
557 * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
558 * those timers will get run whenever the softirq gets handled, at the end of
559 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
561 * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
562 * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
563 * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
565 * @active_mask must be one of:
566 * - HRTIMER_ACTIVE_ALL,
567 * - HRTIMER_ACTIVE_SOFT, or
568 * - HRTIMER_ACTIVE_HARD.
571 __hrtimer_get_next_event(struct hrtimer_cpu_base
*cpu_base
, unsigned int active_mask
)
574 struct hrtimer
*next_timer
= NULL
;
575 ktime_t expires_next
= KTIME_MAX
;
577 if (!cpu_base
->softirq_activated
&& (active_mask
& HRTIMER_ACTIVE_SOFT
)) {
578 active
= cpu_base
->active_bases
& HRTIMER_ACTIVE_SOFT
;
579 cpu_base
->softirq_next_timer
= NULL
;
580 expires_next
= __hrtimer_next_event_base(cpu_base
, NULL
,
583 next_timer
= cpu_base
->softirq_next_timer
;
586 if (active_mask
& HRTIMER_ACTIVE_HARD
) {
587 active
= cpu_base
->active_bases
& HRTIMER_ACTIVE_HARD
;
588 cpu_base
->next_timer
= next_timer
;
589 expires_next
= __hrtimer_next_event_base(cpu_base
, NULL
, active
,
596 static ktime_t
hrtimer_update_next_event(struct hrtimer_cpu_base
*cpu_base
)
598 ktime_t expires_next
, soft
= KTIME_MAX
;
601 * If the soft interrupt has already been activated, ignore the
602 * soft bases. They will be handled in the already raised soft
605 if (!cpu_base
->softirq_activated
) {
606 soft
= __hrtimer_get_next_event(cpu_base
, HRTIMER_ACTIVE_SOFT
);
608 * Update the soft expiry time. clock_settime() might have
611 cpu_base
->softirq_expires_next
= soft
;
614 expires_next
= __hrtimer_get_next_event(cpu_base
, HRTIMER_ACTIVE_HARD
);
616 * If a softirq timer is expiring first, update cpu_base->next_timer
617 * and program the hardware with the soft expiry time.
619 if (expires_next
> soft
) {
620 cpu_base
->next_timer
= cpu_base
->softirq_next_timer
;
627 static inline ktime_t
hrtimer_update_base(struct hrtimer_cpu_base
*base
)
629 ktime_t
*offs_real
= &base
->clock_base
[HRTIMER_BASE_REALTIME
].offset
;
630 ktime_t
*offs_boot
= &base
->clock_base
[HRTIMER_BASE_BOOTTIME
].offset
;
631 ktime_t
*offs_tai
= &base
->clock_base
[HRTIMER_BASE_TAI
].offset
;
633 ktime_t now
= ktime_get_update_offsets_now(&base
->clock_was_set_seq
,
634 offs_real
, offs_boot
, offs_tai
);
636 base
->clock_base
[HRTIMER_BASE_REALTIME_SOFT
].offset
= *offs_real
;
637 base
->clock_base
[HRTIMER_BASE_BOOTTIME_SOFT
].offset
= *offs_boot
;
638 base
->clock_base
[HRTIMER_BASE_TAI_SOFT
].offset
= *offs_tai
;
644 * Is the high resolution mode active ?
646 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base
*cpu_base
)
648 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS
) ?
649 cpu_base
->hres_active
: 0;
652 static inline int hrtimer_hres_active(void)
654 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases
));
657 static void __hrtimer_reprogram(struct hrtimer_cpu_base
*cpu_base
,
658 struct hrtimer
*next_timer
,
659 ktime_t expires_next
)
661 cpu_base
->expires_next
= expires_next
;
664 * If hres is not active, hardware does not have to be
667 * If a hang was detected in the last timer interrupt then we
668 * leave the hang delay active in the hardware. We want the
669 * system to make progress. That also prevents the following
671 * T1 expires 50ms from now
672 * T2 expires 5s from now
674 * T1 is removed, so this code is called and would reprogram
675 * the hardware to 5s from now. Any hrtimer_start after that
676 * will not reprogram the hardware due to hang_detected being
677 * set. So we'd effectively block all timers until the T2 event
680 if (!__hrtimer_hres_active(cpu_base
) || cpu_base
->hang_detected
)
683 tick_program_event(expires_next
, 1);
687 * Reprogram the event source with checking both queues for the
689 * Called with interrupts disabled and base->lock held
692 hrtimer_force_reprogram(struct hrtimer_cpu_base
*cpu_base
, int skip_equal
)
694 ktime_t expires_next
;
696 expires_next
= hrtimer_update_next_event(cpu_base
);
698 if (skip_equal
&& expires_next
== cpu_base
->expires_next
)
701 __hrtimer_reprogram(cpu_base
, cpu_base
->next_timer
, expires_next
);
704 /* High resolution timer related functions */
705 #ifdef CONFIG_HIGH_RES_TIMERS
708 * High resolution timer enabled ?
710 static bool hrtimer_hres_enabled __read_mostly
= true;
711 unsigned int hrtimer_resolution __read_mostly
= LOW_RES_NSEC
;
712 EXPORT_SYMBOL_GPL(hrtimer_resolution
);
715 * Enable / Disable high resolution mode
717 static int __init
setup_hrtimer_hres(char *str
)
719 return (kstrtobool(str
, &hrtimer_hres_enabled
) == 0);
722 __setup("highres=", setup_hrtimer_hres
);
725 * hrtimer_high_res_enabled - query, if the highres mode is enabled
727 static inline int hrtimer_is_hres_enabled(void)
729 return hrtimer_hres_enabled
;
732 static void retrigger_next_event(void *arg
);
735 * Switch to high resolution mode
737 static void hrtimer_switch_to_hres(void)
739 struct hrtimer_cpu_base
*base
= this_cpu_ptr(&hrtimer_bases
);
741 if (tick_init_highres()) {
742 pr_warn("Could not switch to high resolution mode on CPU %u\n",
746 base
->hres_active
= 1;
747 hrtimer_resolution
= HIGH_RES_NSEC
;
749 tick_setup_sched_timer();
750 /* "Retrigger" the interrupt to get things going */
751 retrigger_next_event(NULL
);
756 static inline int hrtimer_is_hres_enabled(void) { return 0; }
757 static inline void hrtimer_switch_to_hres(void) { }
759 #endif /* CONFIG_HIGH_RES_TIMERS */
761 * Retrigger next event is called after clock was set with interrupts
762 * disabled through an SMP function call or directly from low level
765 * This is only invoked when:
766 * - CONFIG_HIGH_RES_TIMERS is enabled.
767 * - CONFIG_NOHZ_COMMON is enabled
769 * For the other cases this function is empty and because the call sites
770 * are optimized out it vanishes as well, i.e. no need for lots of
773 static void retrigger_next_event(void *arg
)
775 struct hrtimer_cpu_base
*base
= this_cpu_ptr(&hrtimer_bases
);
778 * When high resolution mode or nohz is active, then the offsets of
779 * CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the
780 * next tick will take care of that.
782 * If high resolution mode is active then the next expiring timer
783 * must be reevaluated and the clock event device reprogrammed if
786 * In the NOHZ case the update of the offset and the reevaluation
787 * of the next expiring timer is enough. The return from the SMP
788 * function call will take care of the reprogramming in case the
789 * CPU was in a NOHZ idle sleep.
791 if (!__hrtimer_hres_active(base
) && !tick_nohz_active
)
794 raw_spin_lock(&base
->lock
);
795 hrtimer_update_base(base
);
796 if (__hrtimer_hres_active(base
))
797 hrtimer_force_reprogram(base
, 0);
799 hrtimer_update_next_event(base
);
800 raw_spin_unlock(&base
->lock
);
804 * When a timer is enqueued and expires earlier than the already enqueued
805 * timers, we have to check, whether it expires earlier than the timer for
806 * which the clock event device was armed.
808 * Called with interrupts disabled and base->cpu_base.lock held
810 static void hrtimer_reprogram(struct hrtimer
*timer
, bool reprogram
)
812 struct hrtimer_cpu_base
*cpu_base
= this_cpu_ptr(&hrtimer_bases
);
813 struct hrtimer_clock_base
*base
= timer
->base
;
814 ktime_t expires
= ktime_sub(hrtimer_get_expires(timer
), base
->offset
);
816 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer
) < 0);
819 * CLOCK_REALTIME timer might be requested with an absolute
820 * expiry time which is less than base->offset. Set it to 0.
825 if (timer
->is_soft
) {
827 * soft hrtimer could be started on a remote CPU. In this
828 * case softirq_expires_next needs to be updated on the
829 * remote CPU. The soft hrtimer will not expire before the
830 * first hard hrtimer on the remote CPU -
831 * hrtimer_check_target() prevents this case.
833 struct hrtimer_cpu_base
*timer_cpu_base
= base
->cpu_base
;
835 if (timer_cpu_base
->softirq_activated
)
838 if (!ktime_before(expires
, timer_cpu_base
->softirq_expires_next
))
841 timer_cpu_base
->softirq_next_timer
= timer
;
842 timer_cpu_base
->softirq_expires_next
= expires
;
844 if (!ktime_before(expires
, timer_cpu_base
->expires_next
) ||
850 * If the timer is not on the current cpu, we cannot reprogram
851 * the other cpus clock event device.
853 if (base
->cpu_base
!= cpu_base
)
856 if (expires
>= cpu_base
->expires_next
)
860 * If the hrtimer interrupt is running, then it will reevaluate the
861 * clock bases and reprogram the clock event device.
863 if (cpu_base
->in_hrtirq
)
866 cpu_base
->next_timer
= timer
;
868 __hrtimer_reprogram(cpu_base
, timer
, expires
);
871 static bool update_needs_ipi(struct hrtimer_cpu_base
*cpu_base
,
874 struct hrtimer_clock_base
*base
;
879 * Update the base offsets unconditionally so the following
880 * checks whether the SMP function call is required works.
882 * The update is safe even when the remote CPU is in the hrtimer
883 * interrupt or the hrtimer soft interrupt and expiring affected
884 * bases. Either it will see the update before handling a base or
885 * it will see it when it finishes the processing and reevaluates
886 * the next expiring timer.
888 seq
= cpu_base
->clock_was_set_seq
;
889 hrtimer_update_base(cpu_base
);
892 * If the sequence did not change over the update then the
893 * remote CPU already handled it.
895 if (seq
== cpu_base
->clock_was_set_seq
)
899 * If the remote CPU is currently handling an hrtimer interrupt, it
900 * will reevaluate the first expiring timer of all clock bases
901 * before reprogramming. Nothing to do here.
903 if (cpu_base
->in_hrtirq
)
907 * Walk the affected clock bases and check whether the first expiring
908 * timer in a clock base is moving ahead of the first expiring timer of
909 * @cpu_base. If so, the IPI must be invoked because per CPU clock
910 * event devices cannot be remotely reprogrammed.
912 active
&= cpu_base
->active_bases
;
914 for_each_active_base(base
, cpu_base
, active
) {
915 struct timerqueue_node
*next
;
917 next
= timerqueue_getnext(&base
->active
);
918 expires
= ktime_sub(next
->expires
, base
->offset
);
919 if (expires
< cpu_base
->expires_next
)
922 /* Extra check for softirq clock bases */
923 if (base
->clockid
< HRTIMER_BASE_MONOTONIC_SOFT
)
925 if (cpu_base
->softirq_activated
)
927 if (expires
< cpu_base
->softirq_expires_next
)
934 * Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and
935 * CLOCK_BOOTTIME (for late sleep time injection).
937 * This requires to update the offsets for these clocks
938 * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this
939 * also requires to eventually reprogram the per CPU clock event devices
940 * when the change moves an affected timer ahead of the first expiring
941 * timer on that CPU. Obviously remote per CPU clock event devices cannot
942 * be reprogrammed. The other reason why an IPI has to be sent is when the
943 * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets
944 * in the tick, which obviously might be stopped, so this has to bring out
945 * the remote CPU which might sleep in idle to get this sorted.
947 void clock_was_set(unsigned int bases
)
949 struct hrtimer_cpu_base
*cpu_base
= raw_cpu_ptr(&hrtimer_bases
);
953 if (!__hrtimer_hres_active(cpu_base
) && !tick_nohz_active
)
956 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
957 on_each_cpu(retrigger_next_event
, NULL
, 1);
961 /* Avoid interrupting CPUs if possible */
963 for_each_online_cpu(cpu
) {
966 cpu_base
= &per_cpu(hrtimer_bases
, cpu
);
967 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
969 if (update_needs_ipi(cpu_base
, bases
))
970 cpumask_set_cpu(cpu
, mask
);
972 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
976 smp_call_function_many(mask
, retrigger_next_event
, NULL
, 1);
979 free_cpumask_var(mask
);
982 timerfd_clock_was_set();
985 static void clock_was_set_work(struct work_struct
*work
)
987 clock_was_set(CLOCK_SET_WALL
);
990 static DECLARE_WORK(hrtimer_work
, clock_was_set_work
);
993 * Called from timekeeping code to reprogram the hrtimer interrupt device
994 * on all cpus and to notify timerfd.
996 void clock_was_set_delayed(void)
998 schedule_work(&hrtimer_work
);
1002 * Called during resume either directly from via timekeeping_resume()
1003 * or in the case of s2idle from tick_unfreeze() to ensure that the
1004 * hrtimers are up to date.
1006 void hrtimers_resume_local(void)
1008 lockdep_assert_irqs_disabled();
1009 /* Retrigger on the local CPU */
1010 retrigger_next_event(NULL
);
1014 * Counterpart to lock_hrtimer_base above:
1017 void unlock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
1018 __releases(&timer
->base
->cpu_base
->lock
)
1020 raw_spin_unlock_irqrestore(&timer
->base
->cpu_base
->lock
, *flags
);
1024 * hrtimer_forward - forward the timer expiry
1025 * @timer: hrtimer to forward
1026 * @now: forward past this time
1027 * @interval: the interval to forward
1029 * Forward the timer expiry so it will expire in the future.
1030 * Returns the number of overruns.
1032 * Can be safely called from the callback function of @timer. If
1033 * called from other contexts @timer must neither be enqueued nor
1034 * running the callback and the caller needs to take care of
1037 * Note: This only updates the timer expiry value and does not requeue
1040 u64
hrtimer_forward(struct hrtimer
*timer
, ktime_t now
, ktime_t interval
)
1045 delta
= ktime_sub(now
, hrtimer_get_expires(timer
));
1050 if (WARN_ON(timer
->state
& HRTIMER_STATE_ENQUEUED
))
1053 if (interval
< hrtimer_resolution
)
1054 interval
= hrtimer_resolution
;
1056 if (unlikely(delta
>= interval
)) {
1057 s64 incr
= ktime_to_ns(interval
);
1059 orun
= ktime_divns(delta
, incr
);
1060 hrtimer_add_expires_ns(timer
, incr
* orun
);
1061 if (hrtimer_get_expires_tv64(timer
) > now
)
1064 * This (and the ktime_add() below) is the
1065 * correction for exact:
1069 hrtimer_add_expires(timer
, interval
);
1073 EXPORT_SYMBOL_GPL(hrtimer_forward
);
1076 * enqueue_hrtimer - internal function to (re)start a timer
1078 * The timer is inserted in expiry order. Insertion into the
1079 * red black tree is O(log(n)). Must hold the base lock.
1081 * Returns 1 when the new timer is the leftmost timer in the tree.
1083 static int enqueue_hrtimer(struct hrtimer
*timer
,
1084 struct hrtimer_clock_base
*base
,
1085 enum hrtimer_mode mode
)
1087 debug_activate(timer
, mode
);
1089 base
->cpu_base
->active_bases
|= 1 << base
->index
;
1091 /* Pairs with the lockless read in hrtimer_is_queued() */
1092 WRITE_ONCE(timer
->state
, HRTIMER_STATE_ENQUEUED
);
1094 return timerqueue_add(&base
->active
, &timer
->node
);
1098 * __remove_hrtimer - internal function to remove a timer
1100 * Caller must hold the base lock.
1102 * High resolution timer mode reprograms the clock event device when the
1103 * timer is the one which expires next. The caller can disable this by setting
1104 * reprogram to zero. This is useful, when the context does a reprogramming
1105 * anyway (e.g. timer interrupt)
1107 static void __remove_hrtimer(struct hrtimer
*timer
,
1108 struct hrtimer_clock_base
*base
,
1109 u8 newstate
, int reprogram
)
1111 struct hrtimer_cpu_base
*cpu_base
= base
->cpu_base
;
1112 u8 state
= timer
->state
;
1114 /* Pairs with the lockless read in hrtimer_is_queued() */
1115 WRITE_ONCE(timer
->state
, newstate
);
1116 if (!(state
& HRTIMER_STATE_ENQUEUED
))
1119 if (!timerqueue_del(&base
->active
, &timer
->node
))
1120 cpu_base
->active_bases
&= ~(1 << base
->index
);
1123 * Note: If reprogram is false we do not update
1124 * cpu_base->next_timer. This happens when we remove the first
1125 * timer on a remote cpu. No harm as we never dereference
1126 * cpu_base->next_timer. So the worst thing what can happen is
1127 * an superfluous call to hrtimer_force_reprogram() on the
1128 * remote cpu later on if the same timer gets enqueued again.
1130 if (reprogram
&& timer
== cpu_base
->next_timer
)
1131 hrtimer_force_reprogram(cpu_base
, 1);
1135 * remove hrtimer, called with base lock held
1138 remove_hrtimer(struct hrtimer
*timer
, struct hrtimer_clock_base
*base
,
1139 bool restart
, bool keep_local
)
1141 u8 state
= timer
->state
;
1143 if (state
& HRTIMER_STATE_ENQUEUED
) {
1147 * Remove the timer and force reprogramming when high
1148 * resolution mode is active and the timer is on the current
1149 * CPU. If we remove a timer on another CPU, reprogramming is
1150 * skipped. The interrupt event on this CPU is fired and
1151 * reprogramming happens in the interrupt handler. This is a
1152 * rare case and less expensive than a smp call.
1154 debug_deactivate(timer
);
1155 reprogram
= base
->cpu_base
== this_cpu_ptr(&hrtimer_bases
);
1158 * If the timer is not restarted then reprogramming is
1159 * required if the timer is local. If it is local and about
1160 * to be restarted, avoid programming it twice (on removal
1161 * and a moment later when it's requeued).
1164 state
= HRTIMER_STATE_INACTIVE
;
1166 reprogram
&= !keep_local
;
1168 __remove_hrtimer(timer
, base
, state
, reprogram
);
1174 static inline ktime_t
hrtimer_update_lowres(struct hrtimer
*timer
, ktime_t tim
,
1175 const enum hrtimer_mode mode
)
1177 #ifdef CONFIG_TIME_LOW_RES
1179 * CONFIG_TIME_LOW_RES indicates that the system has no way to return
1180 * granular time values. For relative timers we add hrtimer_resolution
1181 * (i.e. one jiffie) to prevent short timeouts.
1183 timer
->is_rel
= mode
& HRTIMER_MODE_REL
;
1185 tim
= ktime_add_safe(tim
, hrtimer_resolution
);
1191 hrtimer_update_softirq_timer(struct hrtimer_cpu_base
*cpu_base
, bool reprogram
)
1196 * Find the next SOFT expiration.
1198 expires
= __hrtimer_get_next_event(cpu_base
, HRTIMER_ACTIVE_SOFT
);
1201 * reprogramming needs to be triggered, even if the next soft
1202 * hrtimer expires at the same time than the next hard
1203 * hrtimer. cpu_base->softirq_expires_next needs to be updated!
1205 if (expires
== KTIME_MAX
)
1209 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
1210 * cpu_base->*expires_next is only set by hrtimer_reprogram()
1212 hrtimer_reprogram(cpu_base
->softirq_next_timer
, reprogram
);
1215 static int __hrtimer_start_range_ns(struct hrtimer
*timer
, ktime_t tim
,
1216 u64 delta_ns
, const enum hrtimer_mode mode
,
1217 struct hrtimer_clock_base
*base
)
1219 struct hrtimer_clock_base
*new_base
;
1220 bool force_local
, first
;
1223 * If the timer is on the local cpu base and is the first expiring
1224 * timer then this might end up reprogramming the hardware twice
1225 * (on removal and on enqueue). To avoid that by prevent the
1226 * reprogram on removal, keep the timer local to the current CPU
1227 * and enforce reprogramming after it is queued no matter whether
1228 * it is the new first expiring timer again or not.
1230 force_local
= base
->cpu_base
== this_cpu_ptr(&hrtimer_bases
);
1231 force_local
&= base
->cpu_base
->next_timer
== timer
;
1234 * Remove an active timer from the queue. In case it is not queued
1235 * on the current CPU, make sure that remove_hrtimer() updates the
1236 * remote data correctly.
1238 * If it's on the current CPU and the first expiring timer, then
1239 * skip reprogramming, keep the timer local and enforce
1240 * reprogramming later if it was the first expiring timer. This
1241 * avoids programming the underlying clock event twice (once at
1242 * removal and once after enqueue).
1244 remove_hrtimer(timer
, base
, true, force_local
);
1246 if (mode
& HRTIMER_MODE_REL
)
1247 tim
= ktime_add_safe(tim
, base
->get_time());
1249 tim
= hrtimer_update_lowres(timer
, tim
, mode
);
1251 hrtimer_set_expires_range_ns(timer
, tim
, delta_ns
);
1253 /* Switch the timer base, if necessary: */
1255 new_base
= switch_hrtimer_base(timer
, base
,
1256 mode
& HRTIMER_MODE_PINNED
);
1261 first
= enqueue_hrtimer(timer
, new_base
, mode
);
1266 * Timer was forced to stay on the current CPU to avoid
1267 * reprogramming on removal and enqueue. Force reprogram the
1268 * hardware by evaluating the new first expiring timer.
1270 hrtimer_force_reprogram(new_base
->cpu_base
, 1);
1275 * hrtimer_start_range_ns - (re)start an hrtimer
1276 * @timer: the timer to be added
1278 * @delta_ns: "slack" range for the timer
1279 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1280 * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
1281 * softirq based mode is considered for debug purpose only!
1283 void hrtimer_start_range_ns(struct hrtimer
*timer
, ktime_t tim
,
1284 u64 delta_ns
, const enum hrtimer_mode mode
)
1286 struct hrtimer_clock_base
*base
;
1287 unsigned long flags
;
1290 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
1291 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
1292 * expiry mode because unmarked timers are moved to softirq expiry.
1294 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
1295 WARN_ON_ONCE(!(mode
& HRTIMER_MODE_SOFT
) ^ !timer
->is_soft
);
1297 WARN_ON_ONCE(!(mode
& HRTIMER_MODE_HARD
) ^ !timer
->is_hard
);
1299 base
= lock_hrtimer_base(timer
, &flags
);
1301 if (__hrtimer_start_range_ns(timer
, tim
, delta_ns
, mode
, base
))
1302 hrtimer_reprogram(timer
, true);
1304 unlock_hrtimer_base(timer
, &flags
);
1306 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns
);
1309 * hrtimer_try_to_cancel - try to deactivate a timer
1310 * @timer: hrtimer to stop
1314 * * 0 when the timer was not active
1315 * * 1 when the timer was active
1316 * * -1 when the timer is currently executing the callback function and
1319 int hrtimer_try_to_cancel(struct hrtimer
*timer
)
1321 struct hrtimer_clock_base
*base
;
1322 unsigned long flags
;
1326 * Check lockless first. If the timer is not active (neither
1327 * enqueued nor running the callback, nothing to do here. The
1328 * base lock does not serialize against a concurrent enqueue,
1329 * so we can avoid taking it.
1331 if (!hrtimer_active(timer
))
1334 base
= lock_hrtimer_base(timer
, &flags
);
1336 if (!hrtimer_callback_running(timer
))
1337 ret
= remove_hrtimer(timer
, base
, false, false);
1339 unlock_hrtimer_base(timer
, &flags
);
1344 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel
);
1346 #ifdef CONFIG_PREEMPT_RT
1347 static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base
*base
)
1349 spin_lock_init(&base
->softirq_expiry_lock
);
1352 static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base
*base
)
1354 spin_lock(&base
->softirq_expiry_lock
);
1357 static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base
*base
)
1359 spin_unlock(&base
->softirq_expiry_lock
);
1363 * The counterpart to hrtimer_cancel_wait_running().
1365 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1366 * the timer callback to finish. Drop expiry_lock and reacquire it. That
1367 * allows the waiter to acquire the lock and make progress.
1369 static void hrtimer_sync_wait_running(struct hrtimer_cpu_base
*cpu_base
,
1370 unsigned long flags
)
1372 if (atomic_read(&cpu_base
->timer_waiters
)) {
1373 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1374 spin_unlock(&cpu_base
->softirq_expiry_lock
);
1375 spin_lock(&cpu_base
->softirq_expiry_lock
);
1376 raw_spin_lock_irq(&cpu_base
->lock
);
1381 * This function is called on PREEMPT_RT kernels when the fast path
1382 * deletion of a timer failed because the timer callback function was
1385 * This prevents priority inversion: if the soft irq thread is preempted
1386 * in the middle of a timer callback, then calling del_timer_sync() can
1387 * lead to two issues:
1389 * - If the caller is on a remote CPU then it has to spin wait for the timer
1390 * handler to complete. This can result in unbound priority inversion.
1392 * - If the caller originates from the task which preempted the timer
1393 * handler on the same CPU, then spin waiting for the timer handler to
1394 * complete is never going to end.
1396 void hrtimer_cancel_wait_running(const struct hrtimer
*timer
)
1398 /* Lockless read. Prevent the compiler from reloading it below */
1399 struct hrtimer_clock_base
*base
= READ_ONCE(timer
->base
);
1402 * Just relax if the timer expires in hard interrupt context or if
1403 * it is currently on the migration base.
1405 if (!timer
->is_soft
|| is_migration_base(base
)) {
1411 * Mark the base as contended and grab the expiry lock, which is
1412 * held by the softirq across the timer callback. Drop the lock
1413 * immediately so the softirq can expire the next timer. In theory
1414 * the timer could already be running again, but that's more than
1415 * unlikely and just causes another wait loop.
1417 atomic_inc(&base
->cpu_base
->timer_waiters
);
1418 spin_lock_bh(&base
->cpu_base
->softirq_expiry_lock
);
1419 atomic_dec(&base
->cpu_base
->timer_waiters
);
1420 spin_unlock_bh(&base
->cpu_base
->softirq_expiry_lock
);
1424 hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base
*base
) { }
1426 hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base
*base
) { }
1428 hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base
*base
) { }
1429 static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base
*base
,
1430 unsigned long flags
) { }
1434 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1435 * @timer: the timer to be cancelled
1438 * 0 when the timer was not active
1439 * 1 when the timer was active
1441 int hrtimer_cancel(struct hrtimer
*timer
)
1446 ret
= hrtimer_try_to_cancel(timer
);
1449 hrtimer_cancel_wait_running(timer
);
1453 EXPORT_SYMBOL_GPL(hrtimer_cancel
);
1456 * __hrtimer_get_remaining - get remaining time for the timer
1457 * @timer: the timer to read
1458 * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
1460 ktime_t
__hrtimer_get_remaining(const struct hrtimer
*timer
, bool adjust
)
1462 unsigned long flags
;
1465 lock_hrtimer_base(timer
, &flags
);
1466 if (IS_ENABLED(CONFIG_TIME_LOW_RES
) && adjust
)
1467 rem
= hrtimer_expires_remaining_adjusted(timer
);
1469 rem
= hrtimer_expires_remaining(timer
);
1470 unlock_hrtimer_base(timer
, &flags
);
1474 EXPORT_SYMBOL_GPL(__hrtimer_get_remaining
);
1476 #ifdef CONFIG_NO_HZ_COMMON
1478 * hrtimer_get_next_event - get the time until next expiry event
1480 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1482 u64
hrtimer_get_next_event(void)
1484 struct hrtimer_cpu_base
*cpu_base
= this_cpu_ptr(&hrtimer_bases
);
1485 u64 expires
= KTIME_MAX
;
1486 unsigned long flags
;
1488 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
1490 if (!__hrtimer_hres_active(cpu_base
))
1491 expires
= __hrtimer_get_next_event(cpu_base
, HRTIMER_ACTIVE_ALL
);
1493 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1499 * hrtimer_next_event_without - time until next expiry event w/o one timer
1500 * @exclude: timer to exclude
1502 * Returns the next expiry time over all timers except for the @exclude one or
1503 * KTIME_MAX if none of them is pending.
1505 u64
hrtimer_next_event_without(const struct hrtimer
*exclude
)
1507 struct hrtimer_cpu_base
*cpu_base
= this_cpu_ptr(&hrtimer_bases
);
1508 u64 expires
= KTIME_MAX
;
1509 unsigned long flags
;
1511 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
1513 if (__hrtimer_hres_active(cpu_base
)) {
1514 unsigned int active
;
1516 if (!cpu_base
->softirq_activated
) {
1517 active
= cpu_base
->active_bases
& HRTIMER_ACTIVE_SOFT
;
1518 expires
= __hrtimer_next_event_base(cpu_base
, exclude
,
1521 active
= cpu_base
->active_bases
& HRTIMER_ACTIVE_HARD
;
1522 expires
= __hrtimer_next_event_base(cpu_base
, exclude
, active
,
1526 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1532 static inline int hrtimer_clockid_to_base(clockid_t clock_id
)
1534 if (likely(clock_id
< MAX_CLOCKS
)) {
1535 int base
= hrtimer_clock_to_base_table
[clock_id
];
1537 if (likely(base
!= HRTIMER_MAX_CLOCK_BASES
))
1540 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id
);
1541 return HRTIMER_BASE_MONOTONIC
;
1544 static void __hrtimer_init(struct hrtimer
*timer
, clockid_t clock_id
,
1545 enum hrtimer_mode mode
)
1547 bool softtimer
= !!(mode
& HRTIMER_MODE_SOFT
);
1548 struct hrtimer_cpu_base
*cpu_base
;
1552 * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
1553 * marked for hard interrupt expiry mode are moved into soft
1554 * interrupt context for latency reasons and because the callbacks
1555 * can invoke functions which might sleep on RT, e.g. spin_lock().
1557 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && !(mode
& HRTIMER_MODE_HARD
))
1560 memset(timer
, 0, sizeof(struct hrtimer
));
1562 cpu_base
= raw_cpu_ptr(&hrtimer_bases
);
1565 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
1566 * clock modifications, so they needs to become CLOCK_MONOTONIC to
1567 * ensure POSIX compliance.
1569 if (clock_id
== CLOCK_REALTIME
&& mode
& HRTIMER_MODE_REL
)
1570 clock_id
= CLOCK_MONOTONIC
;
1572 base
= softtimer
? HRTIMER_MAX_CLOCK_BASES
/ 2 : 0;
1573 base
+= hrtimer_clockid_to_base(clock_id
);
1574 timer
->is_soft
= softtimer
;
1575 timer
->is_hard
= !!(mode
& HRTIMER_MODE_HARD
);
1576 timer
->base
= &cpu_base
->clock_base
[base
];
1577 timerqueue_init(&timer
->node
);
1581 * hrtimer_init - initialize a timer to the given clock
1582 * @timer: the timer to be initialized
1583 * @clock_id: the clock to be used
1584 * @mode: The modes which are relevant for initialization:
1585 * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
1586 * HRTIMER_MODE_REL_SOFT
1588 * The PINNED variants of the above can be handed in,
1589 * but the PINNED bit is ignored as pinning happens
1590 * when the hrtimer is started
1592 void hrtimer_init(struct hrtimer
*timer
, clockid_t clock_id
,
1593 enum hrtimer_mode mode
)
1595 debug_init(timer
, clock_id
, mode
);
1596 __hrtimer_init(timer
, clock_id
, mode
);
1598 EXPORT_SYMBOL_GPL(hrtimer_init
);
1601 * A timer is active, when it is enqueued into the rbtree or the
1602 * callback function is running or it's in the state of being migrated
1605 * It is important for this function to not return a false negative.
1607 bool hrtimer_active(const struct hrtimer
*timer
)
1609 struct hrtimer_clock_base
*base
;
1613 base
= READ_ONCE(timer
->base
);
1614 seq
= raw_read_seqcount_begin(&base
->seq
);
1616 if (timer
->state
!= HRTIMER_STATE_INACTIVE
||
1617 base
->running
== timer
)
1620 } while (read_seqcount_retry(&base
->seq
, seq
) ||
1621 base
!= READ_ONCE(timer
->base
));
1625 EXPORT_SYMBOL_GPL(hrtimer_active
);
1628 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
1629 * distinct sections:
1631 * - queued: the timer is queued
1632 * - callback: the timer is being ran
1633 * - post: the timer is inactive or (re)queued
1635 * On the read side we ensure we observe timer->state and cpu_base->running
1636 * from the same section, if anything changed while we looked at it, we retry.
1637 * This includes timer->base changing because sequence numbers alone are
1638 * insufficient for that.
1640 * The sequence numbers are required because otherwise we could still observe
1641 * a false negative if the read side got smeared over multiple consecutive
1642 * __run_hrtimer() invocations.
1645 static void __run_hrtimer(struct hrtimer_cpu_base
*cpu_base
,
1646 struct hrtimer_clock_base
*base
,
1647 struct hrtimer
*timer
, ktime_t
*now
,
1648 unsigned long flags
) __must_hold(&cpu_base
->lock
)
1650 enum hrtimer_restart (*fn
)(struct hrtimer
*);
1651 bool expires_in_hardirq
;
1654 lockdep_assert_held(&cpu_base
->lock
);
1656 debug_deactivate(timer
);
1657 base
->running
= timer
;
1660 * Separate the ->running assignment from the ->state assignment.
1662 * As with a regular write barrier, this ensures the read side in
1663 * hrtimer_active() cannot observe base->running == NULL &&
1664 * timer->state == INACTIVE.
1666 raw_write_seqcount_barrier(&base
->seq
);
1668 __remove_hrtimer(timer
, base
, HRTIMER_STATE_INACTIVE
, 0);
1669 fn
= timer
->function
;
1672 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
1673 * timer is restarted with a period then it becomes an absolute
1674 * timer. If its not restarted it does not matter.
1676 if (IS_ENABLED(CONFIG_TIME_LOW_RES
))
1677 timer
->is_rel
= false;
1680 * The timer is marked as running in the CPU base, so it is
1681 * protected against migration to a different CPU even if the lock
1684 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1685 trace_hrtimer_expire_entry(timer
, now
);
1686 expires_in_hardirq
= lockdep_hrtimer_enter(timer
);
1688 restart
= fn(timer
);
1690 lockdep_hrtimer_exit(expires_in_hardirq
);
1691 trace_hrtimer_expire_exit(timer
);
1692 raw_spin_lock_irq(&cpu_base
->lock
);
1695 * Note: We clear the running state after enqueue_hrtimer and
1696 * we do not reprogram the event hardware. Happens either in
1697 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1699 * Note: Because we dropped the cpu_base->lock above,
1700 * hrtimer_start_range_ns() can have popped in and enqueued the timer
1703 if (restart
!= HRTIMER_NORESTART
&&
1704 !(timer
->state
& HRTIMER_STATE_ENQUEUED
))
1705 enqueue_hrtimer(timer
, base
, HRTIMER_MODE_ABS
);
1708 * Separate the ->running assignment from the ->state assignment.
1710 * As with a regular write barrier, this ensures the read side in
1711 * hrtimer_active() cannot observe base->running.timer == NULL &&
1712 * timer->state == INACTIVE.
1714 raw_write_seqcount_barrier(&base
->seq
);
1716 WARN_ON_ONCE(base
->running
!= timer
);
1717 base
->running
= NULL
;
1720 static void __hrtimer_run_queues(struct hrtimer_cpu_base
*cpu_base
, ktime_t now
,
1721 unsigned long flags
, unsigned int active_mask
)
1723 struct hrtimer_clock_base
*base
;
1724 unsigned int active
= cpu_base
->active_bases
& active_mask
;
1726 for_each_active_base(base
, cpu_base
, active
) {
1727 struct timerqueue_node
*node
;
1730 basenow
= ktime_add(now
, base
->offset
);
1732 while ((node
= timerqueue_getnext(&base
->active
))) {
1733 struct hrtimer
*timer
;
1735 timer
= container_of(node
, struct hrtimer
, node
);
1738 * The immediate goal for using the softexpires is
1739 * minimizing wakeups, not running timers at the
1740 * earliest interrupt after their soft expiration.
1741 * This allows us to avoid using a Priority Search
1742 * Tree, which can answer a stabbing query for
1743 * overlapping intervals and instead use the simple
1744 * BST we already have.
1745 * We don't add extra wakeups by delaying timers that
1746 * are right-of a not yet expired timer, because that
1747 * timer will have to trigger a wakeup anyway.
1749 if (basenow
< hrtimer_get_softexpires_tv64(timer
))
1752 __run_hrtimer(cpu_base
, base
, timer
, &basenow
, flags
);
1753 if (active_mask
== HRTIMER_ACTIVE_SOFT
)
1754 hrtimer_sync_wait_running(cpu_base
, flags
);
1759 static __latent_entropy
void hrtimer_run_softirq(struct softirq_action
*h
)
1761 struct hrtimer_cpu_base
*cpu_base
= this_cpu_ptr(&hrtimer_bases
);
1762 unsigned long flags
;
1765 hrtimer_cpu_base_lock_expiry(cpu_base
);
1766 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
1768 now
= hrtimer_update_base(cpu_base
);
1769 __hrtimer_run_queues(cpu_base
, now
, flags
, HRTIMER_ACTIVE_SOFT
);
1771 cpu_base
->softirq_activated
= 0;
1772 hrtimer_update_softirq_timer(cpu_base
, true);
1774 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1775 hrtimer_cpu_base_unlock_expiry(cpu_base
);
1778 #ifdef CONFIG_HIGH_RES_TIMERS
1781 * High resolution timer interrupt
1782 * Called with interrupts disabled
1784 void hrtimer_interrupt(struct clock_event_device
*dev
)
1786 struct hrtimer_cpu_base
*cpu_base
= this_cpu_ptr(&hrtimer_bases
);
1787 ktime_t expires_next
, now
, entry_time
, delta
;
1788 unsigned long flags
;
1791 BUG_ON(!cpu_base
->hres_active
);
1792 cpu_base
->nr_events
++;
1793 dev
->next_event
= KTIME_MAX
;
1795 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
1796 entry_time
= now
= hrtimer_update_base(cpu_base
);
1798 cpu_base
->in_hrtirq
= 1;
1800 * We set expires_next to KTIME_MAX here with cpu_base->lock
1801 * held to prevent that a timer is enqueued in our queue via
1802 * the migration code. This does not affect enqueueing of
1803 * timers which run their callback and need to be requeued on
1806 cpu_base
->expires_next
= KTIME_MAX
;
1808 if (!ktime_before(now
, cpu_base
->softirq_expires_next
)) {
1809 cpu_base
->softirq_expires_next
= KTIME_MAX
;
1810 cpu_base
->softirq_activated
= 1;
1811 raise_softirq_irqoff(HRTIMER_SOFTIRQ
);
1814 __hrtimer_run_queues(cpu_base
, now
, flags
, HRTIMER_ACTIVE_HARD
);
1816 /* Reevaluate the clock bases for the [soft] next expiry */
1817 expires_next
= hrtimer_update_next_event(cpu_base
);
1819 * Store the new expiry value so the migration code can verify
1822 cpu_base
->expires_next
= expires_next
;
1823 cpu_base
->in_hrtirq
= 0;
1824 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1826 /* Reprogramming necessary ? */
1827 if (!tick_program_event(expires_next
, 0)) {
1828 cpu_base
->hang_detected
= 0;
1833 * The next timer was already expired due to:
1835 * - long lasting callbacks
1836 * - being scheduled away when running in a VM
1838 * We need to prevent that we loop forever in the hrtimer
1839 * interrupt routine. We give it 3 attempts to avoid
1840 * overreacting on some spurious event.
1842 * Acquire base lock for updating the offsets and retrieving
1845 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
1846 now
= hrtimer_update_base(cpu_base
);
1847 cpu_base
->nr_retries
++;
1851 * Give the system a chance to do something else than looping
1852 * here. We stored the entry time, so we know exactly how long
1853 * we spent here. We schedule the next event this amount of
1856 cpu_base
->nr_hangs
++;
1857 cpu_base
->hang_detected
= 1;
1858 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1860 delta
= ktime_sub(now
, entry_time
);
1861 if ((unsigned int)delta
> cpu_base
->max_hang_time
)
1862 cpu_base
->max_hang_time
= (unsigned int) delta
;
1864 * Limit it to a sensible value as we enforce a longer
1865 * delay. Give the CPU at least 100ms to catch up.
1867 if (delta
> 100 * NSEC_PER_MSEC
)
1868 expires_next
= ktime_add_ns(now
, 100 * NSEC_PER_MSEC
);
1870 expires_next
= ktime_add(now
, delta
);
1871 tick_program_event(expires_next
, 1);
1872 pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta
));
1875 /* called with interrupts disabled */
1876 static inline void __hrtimer_peek_ahead_timers(void)
1878 struct tick_device
*td
;
1880 if (!hrtimer_hres_active())
1883 td
= this_cpu_ptr(&tick_cpu_device
);
1884 if (td
&& td
->evtdev
)
1885 hrtimer_interrupt(td
->evtdev
);
1888 #else /* CONFIG_HIGH_RES_TIMERS */
1890 static inline void __hrtimer_peek_ahead_timers(void) { }
1892 #endif /* !CONFIG_HIGH_RES_TIMERS */
1895 * Called from run_local_timers in hardirq context every jiffy
1897 void hrtimer_run_queues(void)
1899 struct hrtimer_cpu_base
*cpu_base
= this_cpu_ptr(&hrtimer_bases
);
1900 unsigned long flags
;
1903 if (__hrtimer_hres_active(cpu_base
))
1907 * This _is_ ugly: We have to check periodically, whether we
1908 * can switch to highres and / or nohz mode. The clocksource
1909 * switch happens with xtime_lock held. Notification from
1910 * there only sets the check bit in the tick_oneshot code,
1911 * otherwise we might deadlock vs. xtime_lock.
1913 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1914 hrtimer_switch_to_hres();
1918 raw_spin_lock_irqsave(&cpu_base
->lock
, flags
);
1919 now
= hrtimer_update_base(cpu_base
);
1921 if (!ktime_before(now
, cpu_base
->softirq_expires_next
)) {
1922 cpu_base
->softirq_expires_next
= KTIME_MAX
;
1923 cpu_base
->softirq_activated
= 1;
1924 raise_softirq_irqoff(HRTIMER_SOFTIRQ
);
1927 __hrtimer_run_queues(cpu_base
, now
, flags
, HRTIMER_ACTIVE_HARD
);
1928 raw_spin_unlock_irqrestore(&cpu_base
->lock
, flags
);
1932 * Sleep related functions:
1934 static enum hrtimer_restart
hrtimer_wakeup(struct hrtimer
*timer
)
1936 struct hrtimer_sleeper
*t
=
1937 container_of(timer
, struct hrtimer_sleeper
, timer
);
1938 struct task_struct
*task
= t
->task
;
1942 wake_up_process(task
);
1944 return HRTIMER_NORESTART
;
1948 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1949 * @sl: sleeper to be started
1950 * @mode: timer mode abs/rel
1952 * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
1953 * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
1955 void hrtimer_sleeper_start_expires(struct hrtimer_sleeper
*sl
,
1956 enum hrtimer_mode mode
)
1959 * Make the enqueue delivery mode check work on RT. If the sleeper
1960 * was initialized for hard interrupt delivery, force the mode bit.
1961 * This is a special case for hrtimer_sleepers because
1962 * hrtimer_init_sleeper() determines the delivery mode on RT so the
1963 * fiddling with this decision is avoided at the call sites.
1965 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && sl
->timer
.is_hard
)
1966 mode
|= HRTIMER_MODE_HARD
;
1968 hrtimer_start_expires(&sl
->timer
, mode
);
1970 EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires
);
1972 static void __hrtimer_init_sleeper(struct hrtimer_sleeper
*sl
,
1973 clockid_t clock_id
, enum hrtimer_mode mode
)
1976 * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
1977 * marked for hard interrupt expiry mode are moved into soft
1978 * interrupt context either for latency reasons or because the
1979 * hrtimer callback takes regular spinlocks or invokes other
1980 * functions which are not suitable for hard interrupt context on
1983 * The hrtimer_sleeper callback is RT compatible in hard interrupt
1984 * context, but there is a latency concern: Untrusted userspace can
1985 * spawn many threads which arm timers for the same expiry time on
1986 * the same CPU. That causes a latency spike due to the wakeup of
1987 * a gazillion threads.
1989 * OTOH, privileged real-time user space applications rely on the
1990 * low latency of hard interrupt wakeups. If the current task is in
1991 * a real-time scheduling class, mark the mode for hard interrupt
1994 if (IS_ENABLED(CONFIG_PREEMPT_RT
)) {
1995 if (task_is_realtime(current
) && !(mode
& HRTIMER_MODE_SOFT
))
1996 mode
|= HRTIMER_MODE_HARD
;
1999 __hrtimer_init(&sl
->timer
, clock_id
, mode
);
2000 sl
->timer
.function
= hrtimer_wakeup
;
2005 * hrtimer_init_sleeper - initialize sleeper to the given clock
2006 * @sl: sleeper to be initialized
2007 * @clock_id: the clock to be used
2008 * @mode: timer mode abs/rel
2010 void hrtimer_init_sleeper(struct hrtimer_sleeper
*sl
, clockid_t clock_id
,
2011 enum hrtimer_mode mode
)
2013 debug_init(&sl
->timer
, clock_id
, mode
);
2014 __hrtimer_init_sleeper(sl
, clock_id
, mode
);
2017 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper
);
2019 int nanosleep_copyout(struct restart_block
*restart
, struct timespec64
*ts
)
2021 switch(restart
->nanosleep
.type
) {
2022 #ifdef CONFIG_COMPAT_32BIT_TIME
2024 if (put_old_timespec32(ts
, restart
->nanosleep
.compat_rmtp
))
2029 if (put_timespec64(ts
, restart
->nanosleep
.rmtp
))
2035 return -ERESTART_RESTARTBLOCK
;
2038 static int __sched
do_nanosleep(struct hrtimer_sleeper
*t
, enum hrtimer_mode mode
)
2040 struct restart_block
*restart
;
2043 set_current_state(TASK_INTERRUPTIBLE
|TASK_FREEZABLE
);
2044 hrtimer_sleeper_start_expires(t
, mode
);
2046 if (likely(t
->task
))
2049 hrtimer_cancel(&t
->timer
);
2050 mode
= HRTIMER_MODE_ABS
;
2052 } while (t
->task
&& !signal_pending(current
));
2054 __set_current_state(TASK_RUNNING
);
2059 restart
= ¤t
->restart_block
;
2060 if (restart
->nanosleep
.type
!= TT_NONE
) {
2061 ktime_t rem
= hrtimer_expires_remaining(&t
->timer
);
2062 struct timespec64 rmt
;
2066 rmt
= ktime_to_timespec64(rem
);
2068 return nanosleep_copyout(restart
, &rmt
);
2070 return -ERESTART_RESTARTBLOCK
;
2073 static long __sched
hrtimer_nanosleep_restart(struct restart_block
*restart
)
2075 struct hrtimer_sleeper t
;
2078 hrtimer_init_sleeper_on_stack(&t
, restart
->nanosleep
.clockid
,
2080 hrtimer_set_expires_tv64(&t
.timer
, restart
->nanosleep
.expires
);
2081 ret
= do_nanosleep(&t
, HRTIMER_MODE_ABS
);
2082 destroy_hrtimer_on_stack(&t
.timer
);
2086 long hrtimer_nanosleep(ktime_t rqtp
, const enum hrtimer_mode mode
,
2087 const clockid_t clockid
)
2089 struct restart_block
*restart
;
2090 struct hrtimer_sleeper t
;
2094 slack
= current
->timer_slack_ns
;
2095 if (rt_task(current
))
2098 hrtimer_init_sleeper_on_stack(&t
, clockid
, mode
);
2099 hrtimer_set_expires_range_ns(&t
.timer
, rqtp
, slack
);
2100 ret
= do_nanosleep(&t
, mode
);
2101 if (ret
!= -ERESTART_RESTARTBLOCK
)
2104 /* Absolute timers do not update the rmtp value and restart: */
2105 if (mode
== HRTIMER_MODE_ABS
) {
2106 ret
= -ERESTARTNOHAND
;
2110 restart
= ¤t
->restart_block
;
2111 restart
->nanosleep
.clockid
= t
.timer
.base
->clockid
;
2112 restart
->nanosleep
.expires
= hrtimer_get_expires_tv64(&t
.timer
);
2113 set_restart_fn(restart
, hrtimer_nanosleep_restart
);
2115 destroy_hrtimer_on_stack(&t
.timer
);
2121 SYSCALL_DEFINE2(nanosleep
, struct __kernel_timespec __user
*, rqtp
,
2122 struct __kernel_timespec __user
*, rmtp
)
2124 struct timespec64 tu
;
2126 if (get_timespec64(&tu
, rqtp
))
2129 if (!timespec64_valid(&tu
))
2132 current
->restart_block
.fn
= do_no_restart_syscall
;
2133 current
->restart_block
.nanosleep
.type
= rmtp
? TT_NATIVE
: TT_NONE
;
2134 current
->restart_block
.nanosleep
.rmtp
= rmtp
;
2135 return hrtimer_nanosleep(timespec64_to_ktime(tu
), HRTIMER_MODE_REL
,
2141 #ifdef CONFIG_COMPAT_32BIT_TIME
2143 SYSCALL_DEFINE2(nanosleep_time32
, struct old_timespec32 __user
*, rqtp
,
2144 struct old_timespec32 __user
*, rmtp
)
2146 struct timespec64 tu
;
2148 if (get_old_timespec32(&tu
, rqtp
))
2151 if (!timespec64_valid(&tu
))
2154 current
->restart_block
.fn
= do_no_restart_syscall
;
2155 current
->restart_block
.nanosleep
.type
= rmtp
? TT_COMPAT
: TT_NONE
;
2156 current
->restart_block
.nanosleep
.compat_rmtp
= rmtp
;
2157 return hrtimer_nanosleep(timespec64_to_ktime(tu
), HRTIMER_MODE_REL
,
2163 * Functions related to boot-time initialization:
2165 int hrtimers_prepare_cpu(unsigned int cpu
)
2167 struct hrtimer_cpu_base
*cpu_base
= &per_cpu(hrtimer_bases
, cpu
);
2170 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++) {
2171 struct hrtimer_clock_base
*clock_b
= &cpu_base
->clock_base
[i
];
2173 clock_b
->cpu_base
= cpu_base
;
2174 seqcount_raw_spinlock_init(&clock_b
->seq
, &cpu_base
->lock
);
2175 timerqueue_init_head(&clock_b
->active
);
2178 cpu_base
->cpu
= cpu
;
2179 cpu_base
->active_bases
= 0;
2180 cpu_base
->hres_active
= 0;
2181 cpu_base
->hang_detected
= 0;
2182 cpu_base
->next_timer
= NULL
;
2183 cpu_base
->softirq_next_timer
= NULL
;
2184 cpu_base
->expires_next
= KTIME_MAX
;
2185 cpu_base
->softirq_expires_next
= KTIME_MAX
;
2186 hrtimer_cpu_base_init_expiry_lock(cpu_base
);
2190 #ifdef CONFIG_HOTPLUG_CPU
2192 static void migrate_hrtimer_list(struct hrtimer_clock_base
*old_base
,
2193 struct hrtimer_clock_base
*new_base
)
2195 struct hrtimer
*timer
;
2196 struct timerqueue_node
*node
;
2198 while ((node
= timerqueue_getnext(&old_base
->active
))) {
2199 timer
= container_of(node
, struct hrtimer
, node
);
2200 BUG_ON(hrtimer_callback_running(timer
));
2201 debug_deactivate(timer
);
2204 * Mark it as ENQUEUED not INACTIVE otherwise the
2205 * timer could be seen as !active and just vanish away
2206 * under us on another CPU
2208 __remove_hrtimer(timer
, old_base
, HRTIMER_STATE_ENQUEUED
, 0);
2209 timer
->base
= new_base
;
2211 * Enqueue the timers on the new cpu. This does not
2212 * reprogram the event device in case the timer
2213 * expires before the earliest on this CPU, but we run
2214 * hrtimer_interrupt after we migrated everything to
2215 * sort out already expired timers and reprogram the
2218 enqueue_hrtimer(timer
, new_base
, HRTIMER_MODE_ABS
);
2222 int hrtimers_cpu_dying(unsigned int dying_cpu
)
2224 struct hrtimer_cpu_base
*old_base
, *new_base
;
2225 int i
, ncpu
= cpumask_first(cpu_active_mask
);
2227 tick_cancel_sched_timer(dying_cpu
);
2229 old_base
= this_cpu_ptr(&hrtimer_bases
);
2230 new_base
= &per_cpu(hrtimer_bases
, ncpu
);
2233 * The caller is globally serialized and nobody else
2234 * takes two locks at once, deadlock is not possible.
2236 raw_spin_lock(&old_base
->lock
);
2237 raw_spin_lock_nested(&new_base
->lock
, SINGLE_DEPTH_NESTING
);
2239 for (i
= 0; i
< HRTIMER_MAX_CLOCK_BASES
; i
++) {
2240 migrate_hrtimer_list(&old_base
->clock_base
[i
],
2241 &new_base
->clock_base
[i
]);
2245 * The migration might have changed the first expiring softirq
2246 * timer on this CPU. Update it.
2248 __hrtimer_get_next_event(new_base
, HRTIMER_ACTIVE_SOFT
);
2249 /* Tell the other CPU to retrigger the next event */
2250 smp_call_function_single(ncpu
, retrigger_next_event
, NULL
, 0);
2252 raw_spin_unlock(&new_base
->lock
);
2253 raw_spin_unlock(&old_base
->lock
);
2258 #endif /* CONFIG_HOTPLUG_CPU */
2260 void __init
hrtimers_init(void)
2262 hrtimers_prepare_cpu(smp_processor_id());
2263 open_softirq(HRTIMER_SOFTIRQ
, hrtimer_run_softirq
);
2267 * schedule_hrtimeout_range_clock - sleep until timeout
2268 * @expires: timeout value (ktime_t)
2269 * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks
2271 * @clock_id: timer clock to be used
2274 schedule_hrtimeout_range_clock(ktime_t
*expires
, u64 delta
,
2275 const enum hrtimer_mode mode
, clockid_t clock_id
)
2277 struct hrtimer_sleeper t
;
2280 * Optimize when a zero timeout value is given. It does not
2281 * matter whether this is an absolute or a relative time.
2283 if (expires
&& *expires
== 0) {
2284 __set_current_state(TASK_RUNNING
);
2289 * A NULL parameter means "infinite"
2297 * Override any slack passed by the user if under
2300 if (rt_task(current
))
2303 hrtimer_init_sleeper_on_stack(&t
, clock_id
, mode
);
2304 hrtimer_set_expires_range_ns(&t
.timer
, *expires
, delta
);
2305 hrtimer_sleeper_start_expires(&t
, mode
);
2310 hrtimer_cancel(&t
.timer
);
2311 destroy_hrtimer_on_stack(&t
.timer
);
2313 __set_current_state(TASK_RUNNING
);
2315 return !t
.task
? 0 : -EINTR
;
2317 EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock
);
2320 * schedule_hrtimeout_range - sleep until timeout
2321 * @expires: timeout value (ktime_t)
2322 * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks
2325 * Make the current task sleep until the given expiry time has
2326 * elapsed. The routine will return immediately unless
2327 * the current task state has been set (see set_current_state()).
2329 * The @delta argument gives the kernel the freedom to schedule the
2330 * actual wakeup to a time that is both power and performance friendly
2331 * for regular (non RT/DL) tasks.
2332 * The kernel give the normal best effort behavior for "@expires+@delta",
2333 * but may decide to fire the timer earlier, but no earlier than @expires.
2335 * You can set the task state as follows -
2337 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2338 * pass before the routine returns unless the current task is explicitly
2339 * woken up, (e.g. by wake_up_process()).
2341 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2342 * delivered to the current task or the current task is explicitly woken
2345 * The current task state is guaranteed to be TASK_RUNNING when this
2348 * Returns 0 when the timer has expired. If the task was woken before the
2349 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2350 * by an explicit wakeup, it returns -EINTR.
2352 int __sched
schedule_hrtimeout_range(ktime_t
*expires
, u64 delta
,
2353 const enum hrtimer_mode mode
)
2355 return schedule_hrtimeout_range_clock(expires
, delta
, mode
,
2358 EXPORT_SYMBOL_GPL(schedule_hrtimeout_range
);
2361 * schedule_hrtimeout - sleep until timeout
2362 * @expires: timeout value (ktime_t)
2365 * Make the current task sleep until the given expiry time has
2366 * elapsed. The routine will return immediately unless
2367 * the current task state has been set (see set_current_state()).
2369 * You can set the task state as follows -
2371 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2372 * pass before the routine returns unless the current task is explicitly
2373 * woken up, (e.g. by wake_up_process()).
2375 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2376 * delivered to the current task or the current task is explicitly woken
2379 * The current task state is guaranteed to be TASK_RUNNING when this
2382 * Returns 0 when the timer has expired. If the task was woken before the
2383 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2384 * by an explicit wakeup, it returns -EINTR.
2386 int __sched
schedule_hrtimeout(ktime_t
*expires
,
2387 const enum hrtimer_mode mode
)
2389 return schedule_hrtimeout_range(expires
, 0, mode
);
2391 EXPORT_SYMBOL_GPL(schedule_hrtimeout
);