]>
Commit | Line | Data |
---|---|---|
00e5a55c BS |
1 | From: Gerald Schaefer <geraldsc@de.ibm.com> |
2 | Subject: kernel: smp_call_function races. | |
3 | References: bnc#450096 | |
4 | ||
5 | Symptom: System hangs. | |
6 | Problem: The s390 specific implementation of smp_call_function() | |
7 | waits until all signalled cpus have started to execute | |
8 | the function. This can cause deadlocks. | |
9 | Solution: Replace the s390 speficic implementation with the generic | |
10 | ipi infrastructure. | |
11 | ||
12 | Acked-by: John Jolly <jjolly@suse.de> | |
13 | --- | |
14 | arch/s390/Kconfig | 1 | |
15 | arch/s390/include/asm/sigp.h | 1 | |
16 | arch/s390/include/asm/smp.h | 5 - | |
17 | arch/s390/kernel/smp.c | 175 ++++--------------------------------------- | |
18 | 4 files changed, 24 insertions(+), 158 deletions(-) | |
19 | ||
20 | --- a/arch/s390/include/asm/sigp.h | |
21 | +++ b/arch/s390/include/asm/sigp.h | |
22 | @@ -61,6 +61,7 @@ typedef enum | |
23 | { | |
24 | ec_schedule=0, | |
25 | ec_call_function, | |
26 | + ec_call_function_single, | |
27 | ec_bit_last | |
28 | } ec_bit_sig; | |
29 | ||
30 | --- a/arch/s390/include/asm/smp.h | |
31 | +++ b/arch/s390/include/asm/smp.h | |
32 | @@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu); | |
33 | extern struct mutex smp_cpu_state_mutex; | |
34 | extern int smp_cpu_polarization[]; | |
35 | ||
36 | -extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | |
37 | - void *info, int wait); | |
38 | +extern void arch_send_call_function_single_ipi(int cpu); | |
39 | +extern void arch_send_call_function_ipi(cpumask_t mask); | |
40 | + | |
41 | #endif | |
42 | ||
43 | #ifndef CONFIG_SMP | |
44 | --- a/arch/s390/Kconfig | |
45 | +++ b/arch/s390/Kconfig | |
46 | @@ -70,6 +70,7 @@ mainmenu "Linux Kernel Configuration" | |
47 | ||
48 | config S390 | |
49 | def_bool y | |
50 | + select USE_GENERIC_SMP_HELPERS if SMP | |
51 | select HAVE_SYSCALL_WRAPPERS | |
52 | select HAVE_OPROFILE | |
53 | select HAVE_KPROBES | |
54 | --- a/arch/s390/kernel/smp.c | |
55 | +++ b/arch/s390/kernel/smp.c | |
56 | @@ -79,159 +79,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_de | |
57 | ||
58 | static void smp_ext_bitcall(int, ec_bit_sig); | |
59 | ||
60 | -/* | |
61 | - * Structure and data for __smp_call_function_map(). This is designed to | |
62 | - * minimise static memory requirements. It also looks cleaner. | |
63 | - */ | |
64 | -static DEFINE_SPINLOCK(call_lock); | |
65 | - | |
66 | -struct call_data_struct { | |
67 | - void (*func) (void *info); | |
68 | - void *info; | |
69 | - cpumask_t started; | |
70 | - cpumask_t finished; | |
71 | - int wait; | |
72 | -}; | |
73 | - | |
74 | -static struct call_data_struct *call_data; | |
75 | - | |
76 | -/* | |
77 | - * 'Call function' interrupt callback | |
78 | - */ | |
79 | -static void do_call_function(void) | |
80 | -{ | |
81 | - void (*func) (void *info) = call_data->func; | |
82 | - void *info = call_data->info; | |
83 | - int wait = call_data->wait; | |
84 | - | |
85 | - cpu_set(smp_processor_id(), call_data->started); | |
86 | - (*func)(info); | |
87 | - if (wait) | |
88 | - cpu_set(smp_processor_id(), call_data->finished);; | |
89 | -} | |
90 | - | |
91 | -static void __smp_call_function_map(void (*func) (void *info), void *info, | |
92 | - int wait, cpumask_t map) | |
93 | -{ | |
94 | - struct call_data_struct data; | |
95 | - int cpu, local = 0; | |
96 | - | |
97 | - /* | |
98 | - * Can deadlock when interrupts are disabled or if in wrong context. | |
99 | - */ | |
100 | - WARN_ON(irqs_disabled() || in_irq()); | |
101 | - | |
102 | - /* | |
103 | - * Check for local function call. We have to have the same call order | |
104 | - * as in on_each_cpu() because of machine_restart_smp(). | |
105 | - */ | |
106 | - if (cpu_isset(smp_processor_id(), map)) { | |
107 | - local = 1; | |
108 | - cpu_clear(smp_processor_id(), map); | |
109 | - } | |
110 | - | |
111 | - cpus_and(map, map, cpu_online_map); | |
112 | - if (cpus_empty(map)) | |
113 | - goto out; | |
114 | - | |
115 | - data.func = func; | |
116 | - data.info = info; | |
117 | - data.started = CPU_MASK_NONE; | |
118 | - data.wait = wait; | |
119 | - if (wait) | |
120 | - data.finished = CPU_MASK_NONE; | |
121 | - | |
122 | - call_data = &data; | |
123 | - | |
124 | - for_each_cpu_mask(cpu, map) | |
125 | - smp_ext_bitcall(cpu, ec_call_function); | |
126 | - | |
127 | - /* Wait for response */ | |
128 | - while (!cpus_equal(map, data.started)) | |
129 | - cpu_relax(); | |
130 | - if (wait) | |
131 | - while (!cpus_equal(map, data.finished)) | |
132 | - cpu_relax(); | |
133 | -out: | |
134 | - if (local) { | |
135 | - local_irq_disable(); | |
136 | - func(info); | |
137 | - local_irq_enable(); | |
138 | - } | |
139 | -} | |
140 | - | |
141 | -/* | |
142 | - * smp_call_function: | |
143 | - * @func: the function to run; this must be fast and non-blocking | |
144 | - * @info: an arbitrary pointer to pass to the function | |
145 | - * @wait: if true, wait (atomically) until function has completed on other CPUs | |
146 | - * | |
147 | - * Run a function on all other CPUs. | |
148 | - * | |
149 | - * You must not call this function with disabled interrupts, from a | |
150 | - * hardware interrupt handler or from a bottom half. | |
151 | - */ | |
152 | -int smp_call_function(void (*func) (void *info), void *info, int wait) | |
153 | -{ | |
154 | - cpumask_t map; | |
155 | - | |
156 | - spin_lock(&call_lock); | |
157 | - map = cpu_online_map; | |
158 | - cpu_clear(smp_processor_id(), map); | |
159 | - __smp_call_function_map(func, info, wait, map); | |
160 | - spin_unlock(&call_lock); | |
161 | - return 0; | |
162 | -} | |
163 | -EXPORT_SYMBOL(smp_call_function); | |
164 | - | |
165 | -/* | |
166 | - * smp_call_function_single: | |
167 | - * @cpu: the CPU where func should run | |
168 | - * @func: the function to run; this must be fast and non-blocking | |
169 | - * @info: an arbitrary pointer to pass to the function | |
170 | - * @wait: if true, wait (atomically) until function has completed on other CPUs | |
171 | - * | |
172 | - * Run a function on one processor. | |
173 | - * | |
174 | - * You must not call this function with disabled interrupts, from a | |
175 | - * hardware interrupt handler or from a bottom half. | |
176 | - */ | |
177 | -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
178 | - int wait) | |
179 | -{ | |
180 | - spin_lock(&call_lock); | |
181 | - __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); | |
182 | - spin_unlock(&call_lock); | |
183 | - return 0; | |
184 | -} | |
185 | -EXPORT_SYMBOL(smp_call_function_single); | |
186 | - | |
187 | -/** | |
188 | - * smp_call_function_mask(): Run a function on a set of other CPUs. | |
189 | - * @mask: The set of cpus to run on. Must not include the current cpu. | |
190 | - * @func: The function to run. This must be fast and non-blocking. | |
191 | - * @info: An arbitrary pointer to pass to the function. | |
192 | - * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
193 | - * | |
194 | - * Returns 0 on success, else a negative status code. | |
195 | - * | |
196 | - * If @wait is true, then returns once @func has returned; otherwise | |
197 | - * it returns just before the target cpu calls @func. | |
198 | - * | |
199 | - * You must not call this function with disabled interrupts or from a | |
200 | - * hardware interrupt handler or from a bottom half handler. | |
201 | - */ | |
202 | -int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |
203 | - int wait) | |
204 | -{ | |
205 | - spin_lock(&call_lock); | |
206 | - cpu_clear(smp_processor_id(), mask); | |
207 | - __smp_call_function_map(func, info, wait, mask); | |
208 | - spin_unlock(&call_lock); | |
209 | - return 0; | |
210 | -} | |
211 | -EXPORT_SYMBOL(smp_call_function_mask); | |
212 | - | |
213 | void smp_send_stop(void) | |
214 | { | |
215 | int cpu, rc; | |
216 | @@ -273,7 +120,10 @@ static void do_ext_call_interrupt(__u16 | |
217 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | |
218 | ||
219 | if (test_bit(ec_call_function, &bits)) | |
220 | - do_call_function(); | |
221 | + generic_smp_call_function_interrupt(); | |
222 | + | |
223 | + if (test_bit(ec_call_function_single, &bits)) | |
224 | + generic_smp_call_function_single_interrupt(); | |
225 | } | |
226 | ||
227 | /* | |
228 | @@ -290,6 +140,19 @@ static void smp_ext_bitcall(int cpu, ec_ | |
229 | udelay(10); | |
230 | } | |
231 | ||
232 | +void arch_send_call_function_ipi(cpumask_t mask) | |
233 | +{ | |
234 | + int cpu; | |
235 | + | |
236 | + for_each_cpu_mask(cpu, mask) | |
237 | + smp_ext_bitcall(cpu, ec_call_function); | |
238 | +} | |
239 | + | |
240 | +void arch_send_call_function_single_ipi(int cpu) | |
241 | +{ | |
242 | + smp_ext_bitcall(cpu, ec_call_function_single); | |
243 | +} | |
244 | + | |
245 | #ifndef CONFIG_64BIT | |
246 | /* | |
247 | * this function sends a 'purge tlb' signal to another CPU. | |
248 | @@ -588,9 +451,9 @@ int __cpuinit start_secondary(void *cpuv | |
249 | pfault_init(); | |
250 | ||
251 | /* Mark this cpu as online */ | |
252 | - spin_lock(&call_lock); | |
253 | + ipi_call_lock(); | |
254 | cpu_set(smp_processor_id(), cpu_online_map); | |
255 | - spin_unlock(&call_lock); | |
256 | + ipi_call_unlock(); | |
257 | /* Switch on interrupts */ | |
258 | local_irq_enable(); | |
259 | /* Print info about this processor */ |