]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.4.96/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
Linux 3.4.96
[thirdparty/kernel/stable-queue.git] / releases / 3.4.96 / genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
CommitLineData
5540fca7
GKH
1From 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c Mon Sep 17 00:00:00 2001
2From: Thomas Gleixner <tglx@linutronix.de>
3Date: Thu, 7 Mar 2013 14:53:45 +0100
4Subject: genirq: Sanitize spurious interrupt detection of threaded irqs
5
6From: Thomas Gleixner <tglx@linutronix.de>
7
8commit 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c upstream.
9
10Till reported that the spurious interrupt detection of threaded
11interrupts is broken in two ways:
12
13- note_interrupt() is called for each action thread of a shared
14 interrupt line. That's wrong as we are only interested whether none
15 of the device drivers felt responsible for the interrupt, but by
16 calling multiple times for a single interrupt line we account
17 IRQ_NONE even if one of the drivers felt responsible.
18
19- note_interrupt() when called from the thread handler is not
20 serialized. That leaves the members of irq_desc which are used for
21 the spurious detection unprotected.
22
23To solve this we need to defer the spurious detection of a threaded
24interrupt to the next hardware interrupt context where we have
25implicit serialization.
26
27If note_interrupt is called with action_ret == IRQ_WAKE_THREAD, we
28check whether the previous interrupt requested a deferred check. If
29not, we request a deferred check for the next hardware interrupt and
30return.
31
32If set, we check whether one of the interrupt threads signaled
33success. Depending on this information we feed the result into the
34spurious detector.
35
36If one primary handler of a shared interrupt returns IRQ_HANDLED we
37disable the deferred check of irq threads on the same line, as we have
38found at least one device driver who cared.
39
40Reported-by: Till Straumann <strauman@slac.stanford.edu>
41Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
42Tested-by: Austin Schuh <austin@peloton-tech.com>
43Cc: Oliver Hartkopp <socketcan@hartkopp.net>
44Cc: Wolfgang Grandegger <wg@grandegger.com>
45Cc: Pavel Pisa <pisa@cmp.felk.cvut.cz>
46Cc: Marc Kleine-Budde <mkl@pengutronix.de>
47Cc: linux-can@vger.kernel.org
48Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1303071450130.22263@ionos
49Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
50
51---
52 include/linux/irqdesc.h | 4 +
53 kernel/irq/manage.c | 4 -
54 kernel/irq/spurious.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++--
55 3 files changed, 108 insertions(+), 6 deletions(-)
56
57--- a/include/linux/irqdesc.h
58+++ b/include/linux/irqdesc.h
59@@ -27,6 +27,8 @@ struct module;
60 * @irq_count: stats field to detect stalled irqs
61 * @last_unhandled: aging timer for unhandled count
62 * @irqs_unhandled: stats field for spurious unhandled interrupts
63+ * @threads_handled: stats field for deferred spurious detection of threaded handlers
64+ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
65 * @lock: locking for SMP
66 * @affinity_hint: hint to user space for preferred irq affinity
67 * @affinity_notify: context for notification of affinity changes
68@@ -52,6 +54,8 @@ struct irq_desc {
69 unsigned int irq_count; /* For detecting broken IRQs */
70 unsigned long last_unhandled; /* Aging timer for unhandled count */
71 unsigned int irqs_unhandled;
72+ atomic_t threads_handled;
73+ int threads_handled_last;
74 raw_spinlock_t lock;
75 struct cpumask *percpu_enabled;
76 #ifdef CONFIG_SMP
77--- a/kernel/irq/manage.c
78+++ b/kernel/irq/manage.c
79@@ -809,8 +809,8 @@ static int irq_thread(void *data)
80 irq_thread_check_affinity(desc, action);
81
82 action_ret = handler_fn(desc, action);
83- if (!noirqdebug)
84- note_interrupt(action->irq, desc, action_ret);
85+ if (action_ret == IRQ_HANDLED)
86+ atomic_inc(&desc->threads_handled);
87
88 wake_threads_waitq(desc);
89 }
90--- a/kernel/irq/spurious.c
91+++ b/kernel/irq/spurious.c
92@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, stru
93 return action && (action->flags & IRQF_IRQPOLL);
94 }
95
96+#define SPURIOUS_DEFERRED 0x80000000
97+
98 void note_interrupt(unsigned int irq, struct irq_desc *desc,
99 irqreturn_t action_ret)
100 {
101 if (desc->istate & IRQS_POLL_INPROGRESS)
102 return;
103
104- /* we get here again via the threaded handler */
105- if (action_ret == IRQ_WAKE_THREAD)
106- return;
107-
108 if (bad_action_ret(action_ret)) {
109 report_bad_irq(irq, desc, action_ret);
110 return;
111 }
112
113+ /*
114+ * We cannot call note_interrupt from the threaded handler
115+ * because we need to look at the compound of all handlers
116+ * (primary and threaded). Aside of that in the threaded
117+ * shared case we have no serialization against an incoming
118+ * hardware interrupt while we are dealing with a threaded
119+ * result.
120+ *
121+ * So in case a thread is woken, we just note the fact and
122+ * defer the analysis to the next hardware interrupt.
123+ *
124+ * The threaded handlers store whether they sucessfully
125+ * handled an interrupt and we check whether that number
126+ * changed versus the last invocation.
127+ *
128+ * We could handle all interrupts with the delayed by one
129+ * mechanism, but for the non forced threaded case we'd just
130+ * add pointless overhead to the straight hardirq interrupts
131+ * for the sake of a few lines less code.
132+ */
133+ if (action_ret & IRQ_WAKE_THREAD) {
134+ /*
135+ * There is a thread woken. Check whether one of the
136+ * shared primary handlers returned IRQ_HANDLED. If
137+ * not we defer the spurious detection to the next
138+ * interrupt.
139+ */
140+ if (action_ret == IRQ_WAKE_THREAD) {
141+ int handled;
142+ /*
143+ * We use bit 31 of thread_handled_last to
144+ * denote the deferred spurious detection
145+ * active. No locking necessary as
146+ * thread_handled_last is only accessed here
147+ * and we have the guarantee that hard
148+ * interrupts are not reentrant.
149+ */
150+ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
151+ desc->threads_handled_last |= SPURIOUS_DEFERRED;
152+ return;
153+ }
154+ /*
155+ * Check whether one of the threaded handlers
156+ * returned IRQ_HANDLED since the last
157+ * interrupt happened.
158+ *
159+ * For simplicity we just set bit 31, as it is
160+ * set in threads_handled_last as well. So we
161+ * avoid extra masking. And we really do not
162+ * care about the high bits of the handled
163+ * count. We just care about the count being
164+ * different than the one we saw before.
165+ */
166+ handled = atomic_read(&desc->threads_handled);
167+ handled |= SPURIOUS_DEFERRED;
168+ if (handled != desc->threads_handled_last) {
169+ action_ret = IRQ_HANDLED;
170+ /*
171+ * Note: We keep the SPURIOUS_DEFERRED
172+ * bit set. We are handling the
173+ * previous invocation right now.
174+ * Keep it for the current one, so the
175+ * next hardware interrupt will
176+ * account for it.
177+ */
178+ desc->threads_handled_last = handled;
179+ } else {
180+ /*
181+ * None of the threaded handlers felt
182+ * responsible for the last interrupt
183+ *
184+ * We keep the SPURIOUS_DEFERRED bit
185+ * set in threads_handled_last as we
186+ * need to account for the current
187+ * interrupt as well.
188+ */
189+ action_ret = IRQ_NONE;
190+ }
191+ } else {
192+ /*
193+ * One of the primary handlers returned
194+ * IRQ_HANDLED. So we don't care about the
195+ * threaded handlers on the same line. Clear
196+ * the deferred detection bit.
197+ *
198+ * In theory we could/should check whether the
199+ * deferred bit is set and take the result of
200+ * the previous run into account here as
201+ * well. But it's really not worth the
202+ * trouble. If every other interrupt is
203+ * handled we never trigger the spurious
204+ * detector. And if this is just the one out
205+ * of 100k unhandled ones which is handled
206+ * then we merily delay the spurious detection
207+ * by one hard interrupt. Not a real problem.
208+ */
209+ desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
210+ }
211+ }
212+
213 if (unlikely(action_ret == IRQ_NONE)) {
214 /*
215 * If we are seeing only the odd spurious IRQ caused by