]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.4.44/fix-initialization-of-cmci-cmcp-interrupts.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.4.44 / fix-initialization-of-cmci-cmcp-interrupts.patch
CommitLineData
64f24c1c
GKH
1From d303e9e98fce56cdb3c6f2ac92f626fc2bd51c77 Mon Sep 17 00:00:00 2001
2From: Tony Luck <tony.luck@intel.com>
3Date: Wed, 20 Mar 2013 10:30:15 -0700
4Subject: Fix initialization of CMCI/CMCP interrupts
5
6From: Tony Luck <tony.luck@intel.com>
7
8commit d303e9e98fce56cdb3c6f2ac92f626fc2bd51c77 upstream.
9
10Back 2010 during a revamp of the irq code some initializations
11were moved from ia64_mca_init() to ia64_mca_late_init() in
12
13 commit c75f2aa13f5b268aba369b5dc566088b5194377c
14 Cannot use register_percpu_irq() from ia64_mca_init()
15
16But this was hideously wrong. First of all these initializations
17are now down far too late. Specifically after all the other cpus
18have been brought up and initialized their own CMC vectors from
19smp_callin(). Also ia64_mca_late_init() may be called from any cpu
20so the line:
21 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
22is generally not executed on the BSP, and so the CMC vector isn't
23setup at all on that processor.
24
25Make use of the arch_early_irq_init() hook to get this code executed
26at just the right moment: not too early, not too late.
27
28Reported-by: Fred Hartnett <fred.hartnett@hp.com>
29Tested-by: Fred Hartnett <fred.hartnett@hp.com>
30Signed-off-by: Tony Luck <tony.luck@intel.com>
31Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
32
33---
34 arch/ia64/include/asm/mca.h | 1 +
35 arch/ia64/kernel/irq.c | 8 ++++++++
36 arch/ia64/kernel/mca.c | 37 ++++++++++++++++++++++++-------------
37 3 files changed, 33 insertions(+), 13 deletions(-)
38
39--- a/arch/ia64/include/asm/mca.h
40+++ b/arch/ia64/include/asm/mca.h
41@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CP
42 extern int cpe_vector;
43 extern int ia64_cpe_irq;
44 extern void ia64_mca_init(void);
45+extern void ia64_mca_irq_init(void);
46 extern void ia64_mca_cpu_init(void *);
47 extern void ia64_os_mca_dispatch(void);
48 extern void ia64_os_mca_dispatch_end(void);
49--- a/arch/ia64/kernel/irq.c
50+++ b/arch/ia64/kernel/irq.c
51@@ -23,6 +23,8 @@
52 #include <linux/interrupt.h>
53 #include <linux/kernel_stat.h>
54
55+#include <asm/mca.h>
56+
57 /*
58 * 'what should we do if we get a hw irq event on an illegal vector'.
59 * each architecture has to answer this themselves.
60@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct
61
62 #endif /* CONFIG_SMP */
63
64+int __init arch_early_irq_init(void)
65+{
66+ ia64_mca_irq_init();
67+ return 0;
68+}
69+
70 #ifdef CONFIG_HOTPLUG_CPU
71 unsigned int vectors_in_migration[NR_IRQS];
72
73--- a/arch/ia64/kernel/mca.c
74+++ b/arch/ia64/kernel/mca.c
75@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
76 printk(KERN_INFO "MCA related initialization done\n");
77 }
78
79+
80 /*
81- * ia64_mca_late_init
82- *
83- * Opportunity to setup things that require initialization later
84- * than ia64_mca_init. Setup a timer to poll for CPEs if the
85- * platform doesn't support an interrupt driven mechanism.
86- *
87- * Inputs : None
88- * Outputs : Status
89+ * These pieces cannot be done in ia64_mca_init() because it is called before
90+ * early_irq_init() which would wipe out our percpu irq registrations. But we
91+ * cannot leave them until ia64_mca_late_init() because by then all the other
92+ * processors have been brought online and have set their own CMC vectors to
93+ * point at a non-existant action. Called from arch_early_irq_init().
94 */
95-static int __init
96-ia64_mca_late_init(void)
97+void __init ia64_mca_irq_init(void)
98 {
99- if (!mca_init)
100- return 0;
101-
102 /*
103 * Configure the CMCI/P vector and handler. Interrupts for CMC are
104 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
105@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
106 /* Setup the CPEI/P handler */
107 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
108 #endif
109+}
110+
111+/*
112+ * ia64_mca_late_init
113+ *
114+ * Opportunity to setup things that require initialization later
115+ * than ia64_mca_init. Setup a timer to poll for CPEs if the
116+ * platform doesn't support an interrupt driven mechanism.
117+ *
118+ * Inputs : None
119+ * Outputs : Status
120+ */
121+static int __init
122+ia64_mca_late_init(void)
123+{
124+ if (!mca_init)
125+ return 0;
126
127 register_hotcpu_notifier(&mca_cpu_notifier);
128