]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e360adbe | 2 | /* |
90eec103 | 3 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
e360adbe PZ |
4 | * |
5 | * Provides a framework for enqueueing and running callbacks from hardirq | |
6 | * context. The enqueueing is NMI-safe. | |
7 | */ | |
8 | ||
83e3fa6f | 9 | #include <linux/bug.h> |
e360adbe | 10 | #include <linux/kernel.h> |
9984de1a | 11 | #include <linux/export.h> |
e360adbe | 12 | #include <linux/irq_work.h> |
967d1f90 | 13 | #include <linux/percpu.h> |
e360adbe | 14 | #include <linux/hardirq.h> |
ef1f0982 | 15 | #include <linux/irqflags.h> |
bc6679ae FW |
16 | #include <linux/sched.h> |
17 | #include <linux/tick.h> | |
c0e980a4 SR |
18 | #include <linux/cpu.h> |
19 | #include <linux/notifier.h> | |
47885016 | 20 | #include <linux/smp.h> |
967d1f90 | 21 | #include <asm/processor.h> |
e360adbe | 22 | |
e360adbe | 23 | |
b93e0b8f FW |
24 | static DEFINE_PER_CPU(struct llist_head, raised_list); |
25 | static DEFINE_PER_CPU(struct llist_head, lazy_list); | |
e360adbe PZ |
26 | |
27 | /* | |
28 | * Claim the entry so that no one else will poke at it. | |
29 | */ | |
38aaf809 | 30 | static bool irq_work_claim(struct irq_work *work) |
e360adbe | 31 | { |
25269871 | 32 | int oflags; |
e360adbe | 33 | |
7a9f50a0 | 34 | oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); |
e0bbe2d8 | 35 | /* |
25269871 | 36 | * If the work is already pending, no need to raise the IPI. |
feb4a513 | 37 | * The pairing atomic_fetch_andnot() in irq_work_run() makes sure |
25269871 | 38 | * everything we did before is visible. |
e0bbe2d8 | 39 | */ |
25269871 FW |
40 | if (oflags & IRQ_WORK_PENDING) |
41 | return false; | |
e360adbe PZ |
42 | return true; |
43 | } | |
44 | ||
e360adbe PZ |
45 | void __weak arch_irq_work_raise(void) |
46 | { | |
47 | /* | |
48 | * Lame architectures will get the timer tick callback | |
49 | */ | |
50 | } | |
51 | ||
471ba0e6 NP |
52 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
53 | static void __irq_work_queue_local(struct irq_work *work) | |
47885016 | 54 | { |
471ba0e6 | 55 | /* If the work is "lazy", handle it from next tick if any */ |
7a9f50a0 PZ |
56 | if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { |
57 | if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && | |
471ba0e6 NP |
58 | tick_nohz_tick_stopped()) |
59 | arch_irq_work_raise(); | |
60 | } else { | |
7a9f50a0 | 61 | if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) |
471ba0e6 NP |
62 | arch_irq_work_raise(); |
63 | } | |
64 | } | |
47885016 | 65 | |
471ba0e6 NP |
66 | /* Enqueue the irq work @work on the current CPU */ |
67 | bool irq_work_queue(struct irq_work *work) | |
68 | { | |
47885016 FW |
69 | /* Only queue if not already pending */ |
70 | if (!irq_work_claim(work)) | |
71 | return false; | |
72 | ||
471ba0e6 NP |
73 | /* Queue the entry and raise the IPI if needed. */ |
74 | preempt_disable(); | |
75 | __irq_work_queue_local(work); | |
76 | preempt_enable(); | |
6733bab7 | 77 | |
47885016 FW |
78 | return true; |
79 | } | |
471ba0e6 | 80 | EXPORT_SYMBOL_GPL(irq_work_queue); |
47885016 | 81 | |
471ba0e6 NP |
82 | /* |
83 | * Enqueue the irq_work @work on @cpu unless it's already pending | |
84 | * somewhere. | |
85 | * | |
86 | * Can be re-enqueued while the callback is still in progress. | |
87 | */ | |
88 | bool irq_work_queue_on(struct irq_work *work, int cpu) | |
e360adbe | 89 | { |
471ba0e6 NP |
90 | #ifndef CONFIG_SMP |
91 | return irq_work_queue(work); | |
92 | ||
93 | #else /* CONFIG_SMP: */ | |
94 | /* All work should have been flushed before going offline */ | |
95 | WARN_ON_ONCE(cpu_is_offline(cpu)); | |
96 | ||
c02cf5f8 | 97 | /* Only queue if not already pending */ |
98 | if (!irq_work_claim(work)) | |
cd578abb | 99 | return false; |
c02cf5f8 | 100 | |
20b87691 | 101 | preempt_disable(); |
471ba0e6 NP |
102 | if (cpu != smp_processor_id()) { |
103 | /* Arch remote IPI send/receive backend aren't NMI safe */ | |
104 | WARN_ON_ONCE(in_nmi()); | |
7a9f50a0 | 105 | __smp_call_single_queue(cpu, &work->node.llist); |
b93e0b8f | 106 | } else { |
471ba0e6 | 107 | __irq_work_queue_local(work); |
bc6679ae | 108 | } |
20b87691 | 109 | preempt_enable(); |
cd578abb PZ |
110 | |
111 | return true; | |
471ba0e6 | 112 | #endif /* CONFIG_SMP */ |
e360adbe | 113 | } |
471ba0e6 | 114 | |
e360adbe | 115 | |
00b42959 FW |
116 | bool irq_work_needs_cpu(void) |
117 | { | |
b93e0b8f | 118 | struct llist_head *raised, *lazy; |
00b42959 | 119 | |
22127e93 CL |
120 | raised = this_cpu_ptr(&raised_list); |
121 | lazy = this_cpu_ptr(&lazy_list); | |
76a33061 FW |
122 | |
123 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
124 | if (llist_empty(lazy)) | |
125 | return false; | |
00b42959 | 126 | |
8aa2acce SR |
127 | /* All work should have been flushed before going offline */ |
128 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | |
129 | ||
00b42959 FW |
130 | return true; |
131 | } | |
132 | ||
4b44a21d PZ |
133 | void irq_work_single(void *arg) |
134 | { | |
135 | struct irq_work *work = arg; | |
136 | int flags; | |
137 | ||
138 | /* | |
139 | * Clear the PENDING bit, after this point the @work | |
140 | * can be re-used. | |
141 | * Make it immediately visible so that other CPUs trying | |
142 | * to claim that work don't rely on us to handle their data | |
143 | * while we are in the middle of the func. | |
144 | */ | |
7a9f50a0 | 145 | flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->node.a_flags); |
4b44a21d PZ |
146 | |
147 | lockdep_irq_work_enter(work); | |
148 | work->func(work); | |
149 | lockdep_irq_work_exit(work); | |
150 | /* | |
151 | * Clear the BUSY bit and return to the free state if | |
152 | * no-one else claimed it meanwhile. | |
153 | */ | |
154 | flags &= ~IRQ_WORK_PENDING; | |
7a9f50a0 | 155 | (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); |
4b44a21d PZ |
156 | } |
157 | ||
b93e0b8f | 158 | static void irq_work_run_list(struct llist_head *list) |
e360adbe | 159 | { |
d00a08cf | 160 | struct irq_work *work, *tmp; |
38aaf809 | 161 | struct llist_node *llnode; |
e360adbe | 162 | |
b93e0b8f | 163 | BUG_ON(!irqs_disabled()); |
bc6679ae | 164 | |
b93e0b8f | 165 | if (llist_empty(list)) |
e360adbe PZ |
166 | return; |
167 | ||
b93e0b8f | 168 | llnode = llist_del_all(list); |
7a9f50a0 | 169 | llist_for_each_entry_safe(work, tmp, llnode, node.llist) |
4b44a21d | 170 | irq_work_single(work); |
e360adbe | 171 | } |
c0e980a4 SR |
172 | |
173 | /* | |
a77353e5 PZ |
174 | * hotplug calls this through: |
175 | * hotplug_cfd() -> flush_smp_call_function_queue() | |
c0e980a4 SR |
176 | */ |
177 | void irq_work_run(void) | |
178 | { | |
22127e93 CL |
179 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
180 | irq_work_run_list(this_cpu_ptr(&lazy_list)); | |
c0e980a4 | 181 | } |
e360adbe PZ |
182 | EXPORT_SYMBOL_GPL(irq_work_run); |
183 | ||
76a33061 FW |
184 | void irq_work_tick(void) |
185 | { | |
56e4dea8 | 186 | struct llist_head *raised = this_cpu_ptr(&raised_list); |
76a33061 FW |
187 | |
188 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) | |
189 | irq_work_run_list(raised); | |
56e4dea8 | 190 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
76a33061 FW |
191 | } |
192 | ||
e360adbe PZ |
193 | /* |
194 | * Synchronize against the irq_work @entry, ensures the entry is not | |
195 | * currently in use. | |
196 | */ | |
38aaf809 | 197 | void irq_work_sync(struct irq_work *work) |
e360adbe | 198 | { |
3c7169a3 | 199 | lockdep_assert_irqs_enabled(); |
e360adbe | 200 | |
7a9f50a0 | 201 | while (irq_work_is_busy(work)) |
e360adbe PZ |
202 | cpu_relax(); |
203 | } | |
204 | EXPORT_SYMBOL_GPL(irq_work_sync); |