]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/time/tick-broadcast-hrtimer.c
Merge tag 'rtc-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[thirdparty/linux.git] / kernel / time / tick-broadcast-hrtimer.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
5d1638ac 2/*
58c5fc2b 3 * Emulate a local clock event device via a pseudo clock device.
5d1638ac
PM
4 */
5#include <linux/cpu.h>
6#include <linux/err.h>
7#include <linux/hrtimer.h>
8#include <linux/interrupt.h>
9#include <linux/percpu.h>
10#include <linux/profile.h>
11#include <linux/clockchips.h>
12#include <linux/sched.h>
13#include <linux/smp.h>
14#include <linux/module.h>
15
16#include "tick-internal.h"
17
18static struct hrtimer bctimer;
19
ecbebcb8 20static int bc_shutdown(struct clock_event_device *evt)
5d1638ac 21{
ecbebcb8
VK
22 /*
23 * Note, we cannot cancel the timer here as we might
24 * run into the following live lock scenario:
25 *
26 * cpu 0 cpu1
27 * lock(broadcast_lock);
28 * hrtimer_interrupt()
29 * bc_handler()
30 * tick_handle_oneshot_broadcast();
31 * lock(broadcast_lock);
32 * hrtimer_cancel()
33 * wait_for_callback()
34 */
35 hrtimer_try_to_cancel(&bctimer);
36 return 0;
5d1638ac
PM
37}
38
39/*
40 * This is called from the guts of the broadcast code when the cpu
41 * which is about to enter idle has the earliest broadcast timer event.
42 */
43static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
44{
a127d2bc 45 int bc_moved;
5d1638ac
PM
46 /*
47 * We try to cancel the timer first. If the callback is on
48 * flight on some other cpu then we let it handle it. If we
49 * were able to cancel the timer nothing can rearm it as we
50 * own broadcast_lock.
51 *
52 * However we can also be called from the event handler of
53 * ce_broadcast_hrtimer itself when it expires. We cannot
54 * restart the timer because we are in the callback, but we
55 * can set the expiry time and let the callback return
56 * HRTIMER_RESTART.
a127d2bc
PM
57 *
58 * Since we are in the idle loop at this point and because
59 * hrtimer_{start/cancel} functions call into tracing,
60 * calls to these functions must be bound within RCU_NONIDLE.
5d1638ac 61 */
b8a62f1f
TG
62 RCU_NONIDLE({
63 bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
64 if (bc_moved)
65 hrtimer_start(&bctimer, expires,
66 HRTIMER_MODE_ABS_PINNED);});
a127d2bc 67 if (bc_moved) {
5d1638ac
PM
68 /* Bind the "device" to the cpu */
69 bc->bound_on = smp_processor_id();
70 } else if (bc->bound_on == smp_processor_id()) {
71 hrtimer_set_expires(&bctimer, expires);
72 }
73 return 0;
74}
75
76static struct clock_event_device ce_broadcast_hrtimer = {
51302137 77 .name = "bc_hrtimer",
ecbebcb8 78 .set_state_shutdown = bc_shutdown,
5d1638ac
PM
79 .set_next_ktime = bc_set_next,
80 .features = CLOCK_EVT_FEAT_ONESHOT |
81 CLOCK_EVT_FEAT_KTIME |
82 CLOCK_EVT_FEAT_HRTIMER,
83 .rating = 0,
84 .bound_on = -1,
85 .min_delta_ns = 1,
86 .max_delta_ns = KTIME_MAX,
87 .min_delta_ticks = 1,
849401b6 88 .max_delta_ticks = ULONG_MAX,
5d1638ac
PM
89 .mult = 1,
90 .shift = 0,
234b3840 91 .cpumask = cpu_possible_mask,
5d1638ac
PM
92};
93
94static enum hrtimer_restart bc_handler(struct hrtimer *t)
95{
96 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
97
ecbebcb8 98 if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
2456e855 99 if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
38d23a6c 100 return HRTIMER_RESTART;
ecbebcb8
VK
101
102 return HRTIMER_NORESTART;
5d1638ac
PM
103}
104
105void tick_setup_hrtimer_broadcast(void)
106{
107 hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
108 bctimer.function = bc_handler;
109 clockevents_register_device(&ce_broadcast_hrtimer);
110}