]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/irq_work.c
bnxt_en: Fix to include flow direction in L2 key
[thirdparty/linux.git] / kernel / irq_work.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
e360adbe 2/*
90eec103 3 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
e360adbe
PZ
4 *
5 * Provides a framework for enqueueing and running callbacks from hardirq
6 * context. The enqueueing is NMI-safe.
7 */
8
83e3fa6f 9#include <linux/bug.h>
e360adbe 10#include <linux/kernel.h>
9984de1a 11#include <linux/export.h>
e360adbe 12#include <linux/irq_work.h>
967d1f90 13#include <linux/percpu.h>
e360adbe 14#include <linux/hardirq.h>
ef1f0982 15#include <linux/irqflags.h>
bc6679ae
FW
16#include <linux/sched.h>
17#include <linux/tick.h>
c0e980a4
SR
18#include <linux/cpu.h>
19#include <linux/notifier.h>
47885016 20#include <linux/smp.h>
967d1f90 21#include <asm/processor.h>
e360adbe 22
e360adbe 23
b93e0b8f
FW
24static DEFINE_PER_CPU(struct llist_head, raised_list);
25static DEFINE_PER_CPU(struct llist_head, lazy_list);
e360adbe
PZ
26
27/*
28 * Claim the entry so that no one else will poke at it.
29 */
38aaf809 30static bool irq_work_claim(struct irq_work *work)
e360adbe 31{
e0bbe2d8 32 unsigned long flags, oflags, nflags;
e360adbe 33
e0bbe2d8
FW
34 /*
35 * Start with our best wish as a premise but only trust any
36 * flag value after cmpxchg() result.
37 */
38 flags = work->flags & ~IRQ_WORK_PENDING;
38aaf809 39 for (;;) {
6baf9e67 40 nflags = flags | IRQ_WORK_CLAIMED;
e0bbe2d8
FW
41 oflags = cmpxchg(&work->flags, flags, nflags);
42 if (oflags == flags)
38aaf809 43 break;
e0bbe2d8
FW
44 if (oflags & IRQ_WORK_PENDING)
45 return false;
46 flags = oflags;
38aaf809
HY
47 cpu_relax();
48 }
e360adbe
PZ
49
50 return true;
51}
52
e360adbe
PZ
53void __weak arch_irq_work_raise(void)
54{
55 /*
56 * Lame architectures will get the timer tick callback
57 */
58}
59
471ba0e6
NP
60/* Enqueue on current CPU, work must already be claimed and preempt disabled */
61static void __irq_work_queue_local(struct irq_work *work)
47885016 62{
471ba0e6
NP
63 /* If the work is "lazy", handle it from next tick if any */
64 if (work->flags & IRQ_WORK_LAZY) {
65 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
66 tick_nohz_tick_stopped())
67 arch_irq_work_raise();
68 } else {
69 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
70 arch_irq_work_raise();
71 }
72}
47885016 73
471ba0e6
NP
74/* Enqueue the irq work @work on the current CPU */
75bool irq_work_queue(struct irq_work *work)
76{
47885016
FW
77 /* Only queue if not already pending */
78 if (!irq_work_claim(work))
79 return false;
80
471ba0e6
NP
81 /* Queue the entry and raise the IPI if needed. */
82 preempt_disable();
83 __irq_work_queue_local(work);
84 preempt_enable();
6733bab7 85
47885016
FW
86 return true;
87}
471ba0e6 88EXPORT_SYMBOL_GPL(irq_work_queue);
47885016 89
471ba0e6
NP
90/*
91 * Enqueue the irq_work @work on @cpu unless it's already pending
92 * somewhere.
93 *
94 * Can be re-enqueued while the callback is still in progress.
95 */
96bool irq_work_queue_on(struct irq_work *work, int cpu)
e360adbe 97{
471ba0e6
NP
98#ifndef CONFIG_SMP
99 return irq_work_queue(work);
100
101#else /* CONFIG_SMP: */
102 /* All work should have been flushed before going offline */
103 WARN_ON_ONCE(cpu_is_offline(cpu));
104
c02cf5f8 105 /* Only queue if not already pending */
106 if (!irq_work_claim(work))
cd578abb 107 return false;
c02cf5f8 108
20b87691 109 preempt_disable();
471ba0e6
NP
110 if (cpu != smp_processor_id()) {
111 /* Arch remote IPI send/receive backend aren't NMI safe */
112 WARN_ON_ONCE(in_nmi());
113 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
114 arch_send_call_function_single_ipi(cpu);
b93e0b8f 115 } else {
471ba0e6 116 __irq_work_queue_local(work);
bc6679ae 117 }
20b87691 118 preempt_enable();
cd578abb
PZ
119
120 return true;
471ba0e6 121#endif /* CONFIG_SMP */
e360adbe 122}
471ba0e6 123
e360adbe 124
00b42959
FW
125bool irq_work_needs_cpu(void)
126{
b93e0b8f 127 struct llist_head *raised, *lazy;
00b42959 128
22127e93
CL
129 raised = this_cpu_ptr(&raised_list);
130 lazy = this_cpu_ptr(&lazy_list);
76a33061
FW
131
132 if (llist_empty(raised) || arch_irq_work_has_interrupt())
133 if (llist_empty(lazy))
134 return false;
00b42959 135
8aa2acce
SR
136 /* All work should have been flushed before going offline */
137 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
138
00b42959
FW
139 return true;
140}
141
b93e0b8f 142static void irq_work_run_list(struct llist_head *list)
e360adbe 143{
d00a08cf 144 struct irq_work *work, *tmp;
38aaf809 145 struct llist_node *llnode;
d00a08cf 146 unsigned long flags;
e360adbe 147
b93e0b8f 148 BUG_ON(!irqs_disabled());
bc6679ae 149
b93e0b8f 150 if (llist_empty(list))
e360adbe
PZ
151 return;
152
b93e0b8f 153 llnode = llist_del_all(list);
d00a08cf 154 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
e360adbe 155 /*
38aaf809 156 * Clear the PENDING bit, after this point the @work
e360adbe 157 * can be re-used.
c8446b75
FW
158 * Make it immediately visible so that other CPUs trying
159 * to claim that work don't rely on us to handle their data
160 * while we are in the middle of the func.
e360adbe 161 */
bc6679ae
FW
162 flags = work->flags & ~IRQ_WORK_PENDING;
163 xchg(&work->flags, flags);
164
38aaf809 165 work->func(work);
e360adbe
PZ
166 /*
167 * Clear the BUSY bit and return to the free state if
168 * no-one else claimed it meanwhile.
169 */
bc6679ae 170 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
e360adbe
PZ
171 }
172}
c0e980a4
SR
173
174/*
a77353e5
PZ
175 * hotplug calls this through:
176 * hotplug_cfd() -> flush_smp_call_function_queue()
c0e980a4
SR
177 */
178void irq_work_run(void)
179{
22127e93
CL
180 irq_work_run_list(this_cpu_ptr(&raised_list));
181 irq_work_run_list(this_cpu_ptr(&lazy_list));
c0e980a4 182}
e360adbe
PZ
183EXPORT_SYMBOL_GPL(irq_work_run);
184
76a33061
FW
185void irq_work_tick(void)
186{
56e4dea8 187 struct llist_head *raised = this_cpu_ptr(&raised_list);
76a33061
FW
188
189 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
190 irq_work_run_list(raised);
56e4dea8 191 irq_work_run_list(this_cpu_ptr(&lazy_list));
76a33061
FW
192}
193
e360adbe
PZ
194/*
195 * Synchronize against the irq_work @entry, ensures the entry is not
196 * currently in use.
197 */
38aaf809 198void irq_work_sync(struct irq_work *work)
e360adbe 199{
3c7169a3 200 lockdep_assert_irqs_enabled();
e360adbe 201
38aaf809 202 while (work->flags & IRQ_WORK_BUSY)
e360adbe
PZ
203 cpu_relax();
204}
205EXPORT_SYMBOL_GPL(irq_work_sync);