]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.xen/xen-ipi-per-cpu-irq
Updated xen patches taken from suse.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.xen / xen-ipi-per-cpu-irq
1 From: jbeulich@novell.com
2 Subject: fold IPIs onto a single IRQ each
3 Patch-mainline: obsolete
4
5 --- sle11-2009-06-04.orig/arch/x86/kernel/genapic_xen_64.c 2009-06-04 10:46:34.000000000 +0200
6 +++ sle11-2009-06-04/arch/x86/kernel/genapic_xen_64.c 2009-06-04 10:47:21.000000000 +0200
7 @@ -25,13 +25,13 @@
8 #include <asm/genapic.h>
9 #include <xen/evtchn.h>
10
11 -DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
12 -
13 static inline void __send_IPI_one(unsigned int cpu, int vector)
14 {
15 - int irq = per_cpu(ipi_to_irq, cpu)[vector];
16 - BUG_ON(irq < 0);
17 - notify_remote_via_irq(irq);
18 +#ifdef CONFIG_SMP
19 + notify_remote_via_ipi(vector, cpu);
20 +#else
21 + BUG();
22 +#endif
23 }
24
25 static void xen_send_IPI_shortcut(unsigned int shortcut,
26 --- sle11-2009-06-04.orig/arch/x86/kernel/ipi-xen.c 2009-06-04 10:46:34.000000000 +0200
27 +++ sle11-2009-06-04/arch/x86/kernel/ipi-xen.c 2009-06-04 10:47:21.000000000 +0200
28 @@ -48,15 +48,6 @@ static inline int __prepare_ICR2(unsigne
29 }
30 #else
31 #include <xen/evtchn.h>
32 -
33 -DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
34 -
35 -static inline void __send_IPI_one(unsigned int cpu, int vector)
36 -{
37 - int irq = per_cpu(ipi_to_irq, cpu)[vector];
38 - BUG_ON(irq < 0);
39 - notify_remote_via_irq(irq);
40 -}
41 #endif
42
43 void __send_IPI_shortcut(unsigned int shortcut, int vector)
44 @@ -90,12 +81,12 @@ void __send_IPI_shortcut(unsigned int sh
45
46 switch (shortcut) {
47 case APIC_DEST_SELF:
48 - __send_IPI_one(smp_processor_id(), vector);
49 + notify_remote_via_ipi(vector, smp_processor_id());
50 break;
51 case APIC_DEST_ALLBUT:
52 for_each_online_cpu(cpu)
53 if (cpu != smp_processor_id())
54 - __send_IPI_one(cpu, vector);
55 + notify_remote_via_ipi(vector, cpu);
56 break;
57 default:
58 printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
59 @@ -165,7 +156,7 @@ void send_IPI_mask_bitmask(const cpumask
60 WARN_ON(!cpus_subset(*cpumask, cpu_online_map));
61 for_each_online_cpu(cpu)
62 if (cpu_isset(cpu, *cpumask))
63 - __send_IPI_one(cpu, vector);
64 + notify_remote_via_ipi(vector, cpu);
65 #endif
66 local_irq_restore(flags);
67 }
68 --- sle11-2009-06-04.orig/arch/x86/kernel/irq_32-xen.c 2009-06-04 10:21:39.000000000 +0200
69 +++ sle11-2009-06-04/arch/x86/kernel/irq_32-xen.c 2009-06-04 10:47:21.000000000 +0200
70 @@ -404,6 +404,9 @@ void fixup_irqs(cpumask_t map)
71 if (irq == 2)
72 continue;
73
74 + if (irq_desc[irq].status & IRQ_PER_CPU)
75 + continue;
76 +
77 cpus_and(mask, irq_desc[irq].affinity, map);
78 if (any_online_cpu(mask) == NR_CPUS) {
79 /*printk("Breaking affinity for irq %i\n", irq);*/
80 --- sle11-2009-06-04.orig/arch/x86/kernel/irq_64-xen.c 2009-06-04 10:21:39.000000000 +0200
81 +++ sle11-2009-06-04/arch/x86/kernel/irq_64-xen.c 2009-06-04 10:47:21.000000000 +0200
82 @@ -245,6 +245,7 @@ void fixup_irqs(cpumask_t map)
83 spin_lock(&irq_desc[irq].lock);
84
85 if (!irq_has_action(irq) ||
86 + (irq_desc[irq].status & IRQ_PER_CPU) ||
87 cpus_equal(irq_desc[irq].affinity, map)) {
88 spin_unlock(&irq_desc[irq].lock);
89 continue;
90 --- sle11-2009-06-04.orig/drivers/xen/Kconfig 2009-06-04 10:47:17.000000000 +0200
91 +++ sle11-2009-06-04/drivers/xen/Kconfig 2009-06-04 10:47:21.000000000 +0200
92 @@ -4,6 +4,7 @@
93
94 config XEN
95 bool
96 + select IRQ_PER_CPU if SMP
97
98 if XEN
99 config XEN_INTERFACE_VERSION
100 @@ -292,6 +293,9 @@ config HAVE_IRQ_IGNORE_UNHANDLED
101 config GENERIC_HARDIRQS_NO__DO_IRQ
102 def_bool y
103
104 +config IRQ_PER_CPU
105 + bool
106 +
107 config NO_IDLE_HZ
108 def_bool y
109
110 --- sle11-2009-06-04.orig/drivers/xen/core/evtchn.c 2009-06-04 10:47:20.000000000 +0200
111 +++ sle11-2009-06-04/drivers/xen/core/evtchn.c 2009-06-04 10:47:21.000000000 +0200
112 @@ -58,6 +58,22 @@ static DEFINE_SPINLOCK(irq_mapping_updat
113 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
114 [0 ... NR_EVENT_CHANNELS-1] = -1 };
115
116 +/* IRQ <-> IPI mapping. */
117 +#ifndef NR_IPIS
118 +#define NR_IPIS 1
119 +#endif
120 +#if defined(CONFIG_SMP) && defined(CONFIG_X86)
121 +static int ipi_to_irq[NR_IPIS] __read_mostly = {[0 ... NR_IPIS-1] = -1};
122 +static DEFINE_PER_CPU(int[NR_IPIS], ipi_to_evtchn);
123 +#else
124 +#define PER_CPU_IPI_IRQ
125 +#endif
126 +#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
127 +#define BUG_IF_IPI(irq) BUG_ON(type_from_irq(irq) == IRQT_IPI)
128 +#else
129 +#define BUG_IF_IPI(irq) ((void)(irq))
130 +#endif
131 +
132 /* Packed IRQ information: binding type, sub-type index, and event channel. */
133 static u32 irq_info[NR_IRQS];
134
135 @@ -98,10 +114,12 @@ static inline u32 mk_irq_info(u32 type,
136 * Accessors for packed IRQ information.
137 */
138
139 +#ifdef PER_CPU_IPI_IRQ
140 static inline unsigned int evtchn_from_irq(int irq)
141 {
142 return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
143 }
144 +#endif
145
146 static inline unsigned int index_from_irq(int irq)
147 {
148 @@ -113,14 +131,28 @@ static inline unsigned int type_from_irq
149 return irq_info[irq] >> (32 - _IRQT_BITS);
150 }
151
152 +#ifndef PER_CPU_IPI_IRQ
153 +static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
154 +{
155 + BUG_ON(type_from_irq(irq) != IRQT_IPI);
156 + return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)];
157 +}
158 +
159 +static inline unsigned int evtchn_from_irq(unsigned int irq)
160 +{
161 + if (type_from_irq(irq) != IRQT_IPI)
162 + return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
163 + return evtchn_from_per_cpu_irq(irq, smp_processor_id());
164 +}
165 +#endif
166 +
167 /* IRQ <-> VIRQ mapping. */
168 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
169
170 +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
171 /* IRQ <-> IPI mapping. */
172 -#ifndef NR_IPIS
173 -#define NR_IPIS 1
174 -#endif
175 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
176 +#endif
177
178 /* Reference counts for bindings to IRQs. */
179 static int irq_bindcount[NR_IRQS];
180 @@ -145,8 +177,14 @@ static void bind_evtchn_to_cpu(unsigned
181
182 BUG_ON(!test_bit(chn, s->evtchn_mask));
183
184 - if (irq != -1)
185 - irq_desc[irq].affinity = cpumask_of_cpu(cpu);
186 + if (irq != -1) {
187 + struct irq_desc *desc = irq_desc + irq;
188 +
189 + if (!(desc->status & IRQ_PER_CPU))
190 + desc->affinity = cpumask_of_cpu(cpu);
191 + else
192 + cpu_set(cpu, desc->affinity);
193 + }
194
195 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
196 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
197 @@ -315,14 +353,29 @@ asmlinkage void evtchn_do_upcall(struct
198 irq_exit();
199 }
200
201 -static int find_unbound_irq(void)
202 +static struct irq_chip dynirq_chip;
203 +
204 +static int find_unbound_irq(bool percpu)
205 {
206 static int warned;
207 int irq;
208
209 for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++)
210 - if (irq_bindcount[irq] == 0)
211 + if (irq_bindcount[irq] == 0) {
212 + irq_flow_handler_t handle;
213 + const char *name;
214 +
215 + if (!percpu) {
216 + handle = handle_level_irq;
217 + name = "level";
218 + } else {
219 + handle = handle_percpu_irq;
220 + name = "percpu";
221 + }
222 + set_irq_chip_and_handler_name(irq, &dynirq_chip,
223 + handle, name);
224 return irq;
225 + }
226
227 if (!warned) {
228 warned = 1;
229 @@ -340,7 +393,7 @@ static int bind_caller_port_to_irq(unsig
230 spin_lock(&irq_mapping_update_lock);
231
232 if ((irq = evtchn_to_irq[caller_port]) == -1) {
233 - if ((irq = find_unbound_irq()) < 0)
234 + if ((irq = find_unbound_irq(false)) < 0)
235 goto out;
236
237 evtchn_to_irq[caller_port] = irq;
238 @@ -362,7 +415,7 @@ static int bind_local_port_to_irq(unsign
239
240 BUG_ON(evtchn_to_irq[local_port] != -1);
241
242 - if ((irq = find_unbound_irq()) < 0) {
243 + if ((irq = find_unbound_irq(false)) < 0) {
244 struct evtchn_close close = { .port = local_port };
245 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
246 BUG();
247 @@ -415,7 +468,7 @@ static int bind_virq_to_irq(unsigned int
248 spin_lock(&irq_mapping_update_lock);
249
250 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
251 - if ((irq = find_unbound_irq()) < 0)
252 + if ((irq = find_unbound_irq(false)) < 0)
253 goto out;
254
255 bind_virq.virq = virq;
256 @@ -440,6 +493,7 @@ static int bind_virq_to_irq(unsigned int
257 return irq;
258 }
259
260 +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
261 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
262 {
263 struct evtchn_bind_ipi bind_ipi;
264 @@ -448,7 +502,7 @@ static int bind_ipi_to_irq(unsigned int
265 spin_lock(&irq_mapping_update_lock);
266
267 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
268 - if ((irq = find_unbound_irq()) < 0)
269 + if ((irq = find_unbound_irq(false)) < 0)
270 goto out;
271
272 bind_ipi.vcpu = cpu;
273 @@ -471,6 +525,7 @@ static int bind_ipi_to_irq(unsigned int
274 spin_unlock(&irq_mapping_update_lock);
275 return irq;
276 }
277 +#endif
278
279 static void unbind_from_irq(unsigned int irq)
280 {
281 @@ -478,6 +533,7 @@ static void unbind_from_irq(unsigned int
282 unsigned int cpu;
283 int evtchn = evtchn_from_irq(irq);
284
285 + BUG_IF_IPI(irq);
286 spin_lock(&irq_mapping_update_lock);
287
288 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
289 @@ -491,10 +547,12 @@ static void unbind_from_irq(unsigned int
290 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
291 [index_from_irq(irq)] = -1;
292 break;
293 +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
294 case IRQT_IPI:
295 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
296 [index_from_irq(irq)] = -1;
297 break;
298 +#endif
299 default:
300 break;
301 }
302 @@ -513,6 +571,46 @@ static void unbind_from_irq(unsigned int
303 spin_unlock(&irq_mapping_update_lock);
304 }
305
306 +#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
307 +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
308 +{
309 + struct evtchn_close close;
310 + int evtchn = evtchn_from_per_cpu_irq(irq, cpu);
311 +
312 + spin_lock(&irq_mapping_update_lock);
313 +
314 + if (VALID_EVTCHN(evtchn)) {
315 + struct irq_desc *desc = irq_desc + irq;
316 +
317 + mask_evtchn(evtchn);
318 +
319 + BUG_ON(irq_bindcount[irq] <= 1);
320 + irq_bindcount[irq]--;
321 + cpu_clear(cpu, desc->affinity);
322 +
323 + close.port = evtchn;
324 + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
325 + BUG();
326 +
327 + switch (type_from_irq(irq)) {
328 + case IRQT_IPI:
329 + per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = 0;
330 + break;
331 + default:
332 + BUG();
333 + break;
334 + }
335 +
336 + /* Closed ports are implicitly re-bound to VCPU0. */
337 + bind_evtchn_to_cpu(evtchn, 0);
338 +
339 + evtchn_to_irq[evtchn] = -1;
340 + }
341 +
342 + spin_unlock(&irq_mapping_update_lock);
343 +}
344 +#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */
345 +
346 int bind_caller_port_to_irqhandler(
347 unsigned int caller_port,
348 irq_handler_t handler,
349 @@ -607,6 +705,8 @@ int bind_virq_to_irqhandler(
350 }
351 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
352
353 +#ifdef CONFIG_SMP
354 +#ifdef PER_CPU_IPI_IRQ
355 int bind_ipi_to_irqhandler(
356 unsigned int ipi,
357 unsigned int cpu,
358 @@ -629,7 +729,71 @@ int bind_ipi_to_irqhandler(
359
360 return irq;
361 }
362 -EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
363 +#else
364 +int __cpuinit bind_ipi_to_irqaction(
365 + unsigned int ipi,
366 + unsigned int cpu,
367 + struct irqaction *action)
368 +{
369 + struct evtchn_bind_ipi bind_ipi;
370 + int evtchn, irq, retval = 0;
371 +
372 + spin_lock(&irq_mapping_update_lock);
373 +
374 + if (VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi])) {
375 + spin_unlock(&irq_mapping_update_lock);
376 + return -EBUSY;
377 + }
378 +
379 + if ((irq = ipi_to_irq[ipi]) == -1) {
380 + if ((irq = find_unbound_irq(true)) < 0) {
381 + spin_unlock(&irq_mapping_update_lock);
382 + return irq;
383 + }
384 +
385 + /* Extra reference so count will never drop to zero. */
386 + irq_bindcount[irq]++;
387 +
388 + ipi_to_irq[ipi] = irq;
389 + irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, 0);
390 + retval = 1;
391 + }
392 +
393 + bind_ipi.vcpu = cpu;
394 + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
395 + &bind_ipi) != 0)
396 + BUG();
397 +
398 + evtchn = bind_ipi.port;
399 + evtchn_to_irq[evtchn] = irq;
400 + per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
401 +
402 + bind_evtchn_to_cpu(evtchn, cpu);
403 +
404 + irq_bindcount[irq]++;
405 +
406 + spin_unlock(&irq_mapping_update_lock);
407 +
408 + if (retval == 0) {
409 + unsigned long flags;
410 +
411 + local_irq_save(flags);
412 + unmask_evtchn(evtchn);
413 + local_irq_restore(flags);
414 + } else {
415 + action->flags |= IRQF_PERCPU;
416 + retval = setup_irq(irq, action);
417 + if (retval) {
418 + unbind_from_per_cpu_irq(irq, cpu);
419 + BUG_ON(retval > 0);
420 + irq = retval;
421 + }
422 + }
423 +
424 + return irq;
425 +}
426 +#endif /* PER_CPU_IPI_IRQ */
427 +#endif /* CONFIG_SMP */
428
429 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
430 {
431 @@ -655,6 +819,7 @@ static void rebind_irq_to_cpu(unsigned i
432 {
433 int evtchn = evtchn_from_irq(irq);
434
435 + BUG_IF_IPI(irq);
436 if (VALID_EVTCHN(evtchn))
437 rebind_evtchn_to_cpu(evtchn, tcpu);
438 }
439 @@ -739,6 +904,7 @@ static struct irq_chip dynirq_chip = {
440 .unmask = unmask_dynirq,
441 .mask_ack = ack_dynirq,
442 .ack = ack_dynirq,
443 + .eoi = end_dynirq,
444 .end = end_dynirq,
445 #ifdef CONFIG_SMP
446 .set_affinity = set_affinity_irq,
447 @@ -918,10 +1084,21 @@ int irq_ignore_unhandled(unsigned int ir
448 return !!(irq_status.flags & XENIRQSTAT_shared);
449 }
450
451 +#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
452 +void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
453 +{
454 + int evtchn = evtchn_from_per_cpu_irq(ipi_to_irq[ipi], cpu);
455 +
456 + if (VALID_EVTCHN(evtchn))
457 + notify_remote_via_evtchn(evtchn);
458 +}
459 +#endif
460 +
461 void notify_remote_via_irq(int irq)
462 {
463 int evtchn = evtchn_from_irq(irq);
464
465 + BUG_IF_IPI(irq);
466 if (VALID_EVTCHN(evtchn))
467 notify_remote_via_evtchn(evtchn);
468 }
469 @@ -929,6 +1106,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq)
470
471 int irq_to_evtchn_port(int irq)
472 {
473 + BUG_IF_IPI(irq);
474 return evtchn_from_irq(irq);
475 }
476 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
477 @@ -1044,11 +1222,17 @@ static void restore_cpu_virqs(unsigned i
478
479 static void restore_cpu_ipis(unsigned int cpu)
480 {
481 +#ifdef CONFIG_SMP
482 struct evtchn_bind_ipi bind_ipi;
483 int ipi, irq, evtchn;
484
485 for (ipi = 0; ipi < NR_IPIS; ipi++) {
486 +#ifdef PER_CPU_IPI_IRQ
487 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
488 +#else
489 + if ((irq = ipi_to_irq[ipi]) == -1
490 + || !VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi]))
491 +#endif
492 continue;
493
494 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
495 @@ -1062,13 +1246,18 @@ static void restore_cpu_ipis(unsigned in
496
497 /* Record the new mapping. */
498 evtchn_to_irq[evtchn] = irq;
499 +#ifdef PER_CPU_IPI_IRQ
500 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
501 +#else
502 + per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
503 +#endif
504 bind_evtchn_to_cpu(evtchn, cpu);
505
506 /* Ready for use. */
507 if (!(irq_desc[irq].status & IRQ_DISABLED))
508 unmask_evtchn(evtchn);
509 }
510 +#endif
511 }
512
513 static int evtchn_resume(struct sys_device *dev)
514 @@ -1239,8 +1428,6 @@ void __init xen_init_IRQ(void)
515 irq_bindcount[i] = 0;
516
517 irq_desc[i].status |= IRQ_NOPROBE;
518 - set_irq_chip_and_handler_name(i, &dynirq_chip,
519 - handle_level_irq, "level");
520 }
521
522 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
523 --- sle11-2009-06-04.orig/drivers/xen/core/smpboot.c 2009-06-04 10:47:07.000000000 +0200
524 +++ sle11-2009-06-04/drivers/xen/core/smpboot.c 2009-06-04 10:47:21.000000000 +0200
525 @@ -49,12 +49,9 @@ cpumask_t cpu_initialized_map;
526 DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info);
527 EXPORT_PER_CPU_SYMBOL(cpu_info);
528
529 -static DEFINE_PER_CPU(int, resched_irq);
530 -static DEFINE_PER_CPU(int, callfunc_irq);
531 -static DEFINE_PER_CPU(int, call1func_irq);
532 -static char resched_name[NR_CPUS][15];
533 -static char callfunc_name[NR_CPUS][15];
534 -static char call1func_name[NR_CPUS][15];
535 +static int __read_mostly resched_irq = -1;
536 +static int __read_mostly callfunc_irq = -1;
537 +static int __read_mostly call1func_irq = -1;
538
539 #ifdef CONFIG_X86_LOCAL_APIC
540 #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid))
541 @@ -109,47 +106,54 @@ remove_siblinginfo(unsigned int cpu)
542
543 static int __cpuinit xen_smp_intr_init(unsigned int cpu)
544 {
545 + static struct irqaction resched_action = {
546 + .handler = smp_reschedule_interrupt,
547 + .flags = IRQF_DISABLED,
548 + .name = "resched"
549 + }, callfunc_action = {
550 + .handler = smp_call_function_interrupt,
551 + .flags = IRQF_DISABLED,
552 + .name = "callfunc"
553 + }, call1func_action = {
554 + .handler = smp_call_function_single_interrupt,
555 + .flags = IRQF_DISABLED,
556 + .name = "call1func"
557 + };
558 int rc;
559
560 - per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) =
561 - per_cpu(call1func_irq, cpu) = -1;
562 -
563 - sprintf(resched_name[cpu], "resched%u", cpu);
564 - rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
565 - cpu,
566 - smp_reschedule_interrupt,
567 - IRQF_DISABLED|IRQF_NOBALANCING,
568 - resched_name[cpu],
569 - NULL);
570 + rc = bind_ipi_to_irqaction(RESCHEDULE_VECTOR,
571 + cpu,
572 + &resched_action);
573 if (rc < 0)
574 - goto fail;
575 - per_cpu(resched_irq, cpu) = rc;
576 -
577 - sprintf(callfunc_name[cpu], "callfunc%u", cpu);
578 - rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
579 - cpu,
580 - smp_call_function_interrupt,
581 - IRQF_DISABLED|IRQF_NOBALANCING,
582 - callfunc_name[cpu],
583 - NULL);
584 + return rc;
585 + if (resched_irq < 0)
586 + resched_irq = rc;
587 + else
588 + BUG_ON(resched_irq != rc);
589 +
590 + rc = bind_ipi_to_irqaction(CALL_FUNCTION_VECTOR,
591 + cpu,
592 + &callfunc_action);
593 if (rc < 0)
594 - goto fail;
595 - per_cpu(callfunc_irq, cpu) = rc;
596 -
597 - sprintf(call1func_name[cpu], "call1func%u", cpu);
598 - rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR,
599 - cpu,
600 - smp_call_function_single_interrupt,
601 - IRQF_DISABLED|IRQF_NOBALANCING,
602 - call1func_name[cpu],
603 - NULL);
604 + goto unbind_resched;
605 + if (callfunc_irq < 0)
606 + callfunc_irq = rc;
607 + else
608 + BUG_ON(callfunc_irq != rc);
609 +
610 + rc = bind_ipi_to_irqaction(CALL_FUNC_SINGLE_VECTOR,
611 + cpu,
612 + &call1func_action);
613 if (rc < 0)
614 - goto fail;
615 - per_cpu(call1func_irq, cpu) = rc;
616 + goto unbind_call;
617 + if (call1func_irq < 0)
618 + call1func_irq = rc;
619 + else
620 + BUG_ON(call1func_irq != rc);
621
622 rc = xen_spinlock_init(cpu);
623 if (rc < 0)
624 - goto fail;
625 + goto unbind_call1;
626
627 if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
628 goto fail;
629 @@ -157,13 +161,13 @@ static int __cpuinit xen_smp_intr_init(u
630 return 0;
631
632 fail:
633 - if (per_cpu(resched_irq, cpu) >= 0)
634 - unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
635 - if (per_cpu(callfunc_irq, cpu) >= 0)
636 - unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
637 - if (per_cpu(call1func_irq, cpu) >= 0)
638 - unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
639 xen_spinlock_cleanup(cpu);
640 + unbind_call1:
641 + unbind_from_per_cpu_irq(call1func_irq, cpu);
642 + unbind_call:
643 + unbind_from_per_cpu_irq(callfunc_irq, cpu);
644 + unbind_resched:
645 + unbind_from_per_cpu_irq(resched_irq, cpu);
646 return rc;
647 }
648
649 @@ -173,9 +177,9 @@ static void __cpuinit xen_smp_intr_exit(
650 if (cpu != 0)
651 local_teardown_timer(cpu);
652
653 - unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
654 - unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
655 - unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
656 + unbind_from_per_cpu_irq(resched_irq, cpu);
657 + unbind_from_per_cpu_irq(callfunc_irq, cpu);
658 + unbind_from_per_cpu_irq(call1func_irq, cpu);
659 xen_spinlock_cleanup(cpu);
660 }
661 #endif
662 --- sle11-2009-06-04.orig/drivers/xen/core/spinlock.c 2009-06-04 10:36:24.000000000 +0200
663 +++ sle11-2009-06-04/drivers/xen/core/spinlock.c 2009-06-04 10:47:21.000000000 +0200
664 @@ -16,8 +16,7 @@
665
666 extern irqreturn_t smp_reschedule_interrupt(int, void *);
667
668 -static DEFINE_PER_CPU(int, spinlock_irq) = -1;
669 -static char spinlock_name[NR_CPUS][15];
670 +static int __read_mostly spinlock_irq = -1;
671
672 struct spinning {
673 raw_spinlock_t *lock;
674 @@ -34,34 +33,36 @@ static DEFINE_PER_CPU(raw_rwlock_t, spin
675
676 int __cpuinit xen_spinlock_init(unsigned int cpu)
677 {
678 + static struct irqaction spinlock_action = {
679 + .handler = smp_reschedule_interrupt,
680 + .flags = IRQF_DISABLED,
681 + .name = "spinlock"
682 + };
683 int rc;
684
685 - sprintf(spinlock_name[cpu], "spinlock%u", cpu);
686 - rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR,
687 - cpu,
688 - smp_reschedule_interrupt,
689 - IRQF_DISABLED|IRQF_NOBALANCING,
690 - spinlock_name[cpu],
691 - NULL);
692 + rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR,
693 + cpu,
694 + &spinlock_action);
695 if (rc < 0)
696 return rc;
697
698 - disable_irq(rc); /* make sure it's never delivered */
699 - per_cpu(spinlock_irq, cpu) = rc;
700 + if (spinlock_irq < 0) {
701 + disable_irq(rc); /* make sure it's never delivered */
702 + spinlock_irq = rc;
703 + } else
704 + BUG_ON(spinlock_irq != rc);
705
706 return 0;
707 }
708
709 void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
710 {
711 - if (per_cpu(spinlock_irq, cpu) >= 0)
712 - unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL);
713 - per_cpu(spinlock_irq, cpu) = -1;
714 + unbind_from_per_cpu_irq(spinlock_irq, cpu);
715 }
716
717 int xen_spin_wait(raw_spinlock_t *lock, unsigned int token)
718 {
719 - int rc = 0, irq = __get_cpu_var(spinlock_irq);
720 + int rc = 0, irq = spinlock_irq;
721 raw_rwlock_t *rm_lock;
722 unsigned long flags;
723 struct spinning spinning;
724 @@ -155,7 +156,7 @@ void xen_spin_kick(raw_spinlock_t *lock,
725 raw_local_irq_restore(flags);
726
727 if (unlikely(spinning)) {
728 - notify_remote_via_irq(per_cpu(spinlock_irq, cpu));
729 + notify_remote_via_ipi(SPIN_UNLOCK_VECTOR, cpu);
730 return;
731 }
732 }
733 --- sle11-2009-06-04.orig/include/xen/evtchn.h 2009-06-04 10:47:20.000000000 +0200
734 +++ sle11-2009-06-04/include/xen/evtchn.h 2009-06-04 10:47:21.000000000 +0200
735 @@ -78,6 +78,8 @@ int bind_virq_to_irqhandler(
736 unsigned long irqflags,
737 const char *devname,
738 void *dev_id);
739 +#if defined(CONFIG_SMP) && !defined(MODULE)
740 +#ifndef CONFIG_X86
741 int bind_ipi_to_irqhandler(
742 unsigned int ipi,
743 unsigned int cpu,
744 @@ -85,6 +87,13 @@ int bind_ipi_to_irqhandler(
745 unsigned long irqflags,
746 const char *devname,
747 void *dev_id);
748 +#else
749 +int bind_ipi_to_irqaction(
750 + unsigned int ipi,
751 + unsigned int cpu,
752 + struct irqaction *action);
753 +#endif
754 +#endif
755
756 /*
757 * Common unbind function for all event sources. Takes IRQ to unbind from.
758 @@ -93,6 +102,11 @@ int bind_ipi_to_irqhandler(
759 */
760 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
761
762 +#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
763 +/* Specialized unbind function for per-CPU IRQs. */
764 +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu);
765 +#endif
766 +
767 #ifndef CONFIG_XEN
768 void irq_resume(void);
769 #endif
770 @@ -184,4 +198,8 @@ int clear_pirq_hw_action(int pirq);
771 #define PIRQ_END 5
772 #define PIRQ_ACK 6
773
774 +#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
775 +void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu);
776 +#endif
777 +
778 #endif /* __ASM_EVTCHN_H__ */