]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/60053_xen-ipi-per-cpu-irq.patch1
Started core30.
[people/pmueller/ipfire-2.x.git] / src / patches / 60053_xen-ipi-per-cpu-irq.patch1
CommitLineData
cc90b958
BS
1From: jbeulich@novell.com
2Subject: fold IPIs onto a single IRQ each
3Patch-mainline: obsolete
4
5Index: head-2008-12-01/arch/x86/kernel/genapic_xen_64.c
6===================================================================
7--- head-2008-12-01.orig/arch/x86/kernel/genapic_xen_64.c 2008-11-25 13:12:11.000000000 +0100
8+++ head-2008-12-01/arch/x86/kernel/genapic_xen_64.c 2008-12-01 12:07:34.000000000 +0100
9@@ -25,13 +25,9 @@
10 #include <asm/genapic.h>
11 #include <xen/evtchn.h>
12
13-DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
14-
15 static inline void __send_IPI_one(unsigned int cpu, int vector)
16 {
17- int irq = per_cpu(ipi_to_irq, cpu)[vector];
18- BUG_ON(irq < 0);
19- notify_remote_via_irq(irq);
20+ notify_remote_via_ipi(vector, cpu);
21 }
22
23 static void xen_send_IPI_shortcut(unsigned int shortcut,
24Index: head-2008-12-01/arch/x86/kernel/ipi-xen.c
25===================================================================
26--- head-2008-12-01.orig/arch/x86/kernel/ipi-xen.c 2008-11-25 13:12:11.000000000 +0100
27+++ head-2008-12-01/arch/x86/kernel/ipi-xen.c 2008-12-01 12:07:34.000000000 +0100
28@@ -48,15 +48,6 @@ static inline int __prepare_ICR2(unsigne
29 }
30 #else
31 #include <xen/evtchn.h>
32-
33-DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
34-
35-static inline void __send_IPI_one(unsigned int cpu, int vector)
36-{
37- int irq = per_cpu(ipi_to_irq, cpu)[vector];
38- BUG_ON(irq < 0);
39- notify_remote_via_irq(irq);
40-}
41 #endif
42
43 void __send_IPI_shortcut(unsigned int shortcut, int vector)
44@@ -90,12 +81,12 @@ void __send_IPI_shortcut(unsigned int sh
45
46 switch (shortcut) {
47 case APIC_DEST_SELF:
48- __send_IPI_one(smp_processor_id(), vector);
49+ notify_remote_via_ipi(vector, smp_processor_id());
50 break;
51 case APIC_DEST_ALLBUT:
52 for_each_online_cpu(cpu)
53 if (cpu != smp_processor_id())
54- __send_IPI_one(cpu, vector);
55+ notify_remote_via_ipi(vector, cpu);
56 break;
57 default:
58 printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
59@@ -165,7 +156,7 @@ void send_IPI_mask_bitmask(const cpumask
60 WARN_ON(!cpus_empty(mask));
61 for_each_online_cpu(cpu)
62 if (cpu_isset(cpu, cpumask))
63- __send_IPI_one(cpu, vector);
64+ notify_remote_via_ipi(vector, cpu);
65 #endif
66 local_irq_restore(flags);
67 }
68Index: head-2008-12-01/arch/x86/kernel/irq_32-xen.c
69===================================================================
70--- head-2008-12-01.orig/arch/x86/kernel/irq_32-xen.c 2008-12-01 11:49:07.000000000 +0100
71+++ head-2008-12-01/arch/x86/kernel/irq_32-xen.c 2008-12-01 12:07:34.000000000 +0100
72@@ -404,6 +404,9 @@ void fixup_irqs(cpumask_t map)
73 if (irq == 2)
74 continue;
75
76+ if (irq_desc[irq].status & IRQ_PER_CPU)
77+ continue;
78+
79 cpus_and(mask, irq_desc[irq].affinity, map);
80 if (any_online_cpu(mask) == NR_CPUS) {
81 /*printk("Breaking affinity for irq %i\n", irq);*/
82Index: head-2008-12-01/arch/x86/kernel/irq_64-xen.c
83===================================================================
84--- head-2008-12-01.orig/arch/x86/kernel/irq_64-xen.c 2008-12-01 11:49:07.000000000 +0100
85+++ head-2008-12-01/arch/x86/kernel/irq_64-xen.c 2008-12-01 12:07:34.000000000 +0100
86@@ -245,6 +245,7 @@ void fixup_irqs(cpumask_t map)
87 spin_lock(&irq_desc[irq].lock);
88
89 if (!irq_has_action(irq) ||
90+ (irq_desc[irq].status & IRQ_PER_CPU) ||
91 cpus_equal(irq_desc[irq].affinity, map)) {
92 spin_unlock(&irq_desc[irq].lock);
93 continue;
94Index: head-2008-12-01/drivers/xen/Kconfig
95===================================================================
96--- head-2008-12-01.orig/drivers/xen/Kconfig 2008-10-24 10:52:17.000000000 +0200
97+++ head-2008-12-01/drivers/xen/Kconfig 2008-12-01 12:07:34.000000000 +0100
98@@ -4,6 +4,7 @@
99
100 config XEN
101 bool
102+ select IRQ_PER_CPU if SMP
103
104 if XEN
105 config XEN_INTERFACE_VERSION
106@@ -292,6 +293,9 @@ config HAVE_IRQ_IGNORE_UNHANDLED
107 config GENERIC_HARDIRQS_NO__DO_IRQ
108 def_bool y
109
110+config IRQ_PER_CPU
111+ bool
112+
113 config NO_IDLE_HZ
114 def_bool y
115
116Index: head-2008-12-01/drivers/xen/core/evtchn.c
117===================================================================
118--- head-2008-12-01.orig/drivers/xen/core/evtchn.c 2008-12-02 09:14:14.000000000 +0100
119+++ head-2008-12-01/drivers/xen/core/evtchn.c 2008-12-02 09:14:29.000000000 +0100
120@@ -57,6 +57,22 @@ static DEFINE_SPINLOCK(irq_mapping_updat
121 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
122 [0 ... NR_EVENT_CHANNELS-1] = -1 };
123
124+/* IRQ <-> IPI mapping. */
125+#ifndef NR_IPIS
126+#define NR_IPIS 1
127+#endif
128+#if defined(CONFIG_SMP) && defined(CONFIG_X86)
129+static int ipi_to_irq[NR_IPIS] __read_mostly = {[0 ... NR_IPIS-1] = -1};
130+static DEFINE_PER_CPU(int[NR_IPIS], ipi_to_evtchn) = {[0 ... NR_IPIS-1] = -1};
131+#else
132+#define PER_CPU_IPI_IRQ
133+#endif
134+#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
135+#define BUG_IF_IPI(irq) BUG_ON(type_from_irq(irq) == IRQT_IPI)
136+#else
137+#define BUG_IF_IPI(irq) ((void)(irq))
138+#endif
139+
140 /* Packed IRQ information: binding type, sub-type index, and event channel. */
141 static u32 irq_info[NR_IRQS];
142
143@@ -97,10 +113,12 @@ static inline u32 mk_irq_info(u32 type,
144 * Accessors for packed IRQ information.
145 */
146
147+#ifdef PER_CPU_IPI_IRQ
148 static inline unsigned int evtchn_from_irq(int irq)
149 {
150 return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
151 }
152+#endif
153
154 static inline unsigned int index_from_irq(int irq)
155 {
156@@ -112,14 +130,28 @@ static inline unsigned int type_from_irq
157 return irq_info[irq] >> (32 - _IRQT_BITS);
158 }
159
160+#ifndef PER_CPU_IPI_IRQ
161+static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
162+{
163+ BUG_ON(type_from_irq(irq) != IRQT_IPI);
164+ return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)];
165+}
166+
167+static inline unsigned int evtchn_from_irq(unsigned int irq)
168+{
169+ if (type_from_irq(irq) != IRQT_IPI)
170+ return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
171+ return evtchn_from_per_cpu_irq(irq, smp_processor_id());
172+}
173+#endif
174+
175 /* IRQ <-> VIRQ mapping. */
176 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
177
178+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
179 /* IRQ <-> IPI mapping. */
180-#ifndef NR_IPIS
181-#define NR_IPIS 1
182-#endif
183 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
184+#endif
185
186 /* Reference counts for bindings to IRQs. */
187 static int irq_bindcount[NR_IRQS];
188@@ -144,8 +176,14 @@ static void bind_evtchn_to_cpu(unsigned
189
190 BUG_ON(!test_bit(chn, s->evtchn_mask));
191
192- if (irq != -1)
193- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
194+ if (irq != -1) {
195+ struct irq_desc *desc = irq_desc + irq;
196+
197+ if (!(desc->status & IRQ_PER_CPU))
198+ desc->affinity = cpumask_of_cpu(cpu);
199+ else
200+ cpu_set(cpu, desc->affinity);
201+ }
202
203 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
204 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
205@@ -439,6 +477,7 @@ static int bind_virq_to_irq(unsigned int
206 return irq;
207 }
208
209+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
210 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
211 {
212 struct evtchn_bind_ipi bind_ipi;
213@@ -470,6 +509,7 @@ static int bind_ipi_to_irq(unsigned int
214 spin_unlock(&irq_mapping_update_lock);
215 return irq;
216 }
217+#endif
218
219 static void unbind_from_irq(unsigned int irq)
220 {
221@@ -477,6 +517,7 @@ static void unbind_from_irq(unsigned int
222 unsigned int cpu;
223 int evtchn = evtchn_from_irq(irq);
224
225+ BUG_IF_IPI(irq);
226 spin_lock(&irq_mapping_update_lock);
227
228 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
229@@ -490,10 +531,12 @@ static void unbind_from_irq(unsigned int
230 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
231 [index_from_irq(irq)] = -1;
232 break;
233+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
234 case IRQT_IPI:
235 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
236 [index_from_irq(irq)] = -1;
237 break;
238+#endif
239 default:
240 break;
241 }
242@@ -512,6 +555,46 @@ static void unbind_from_irq(unsigned int
243 spin_unlock(&irq_mapping_update_lock);
244 }
245
246+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
247+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
248+{
249+ struct evtchn_close close;
250+ int evtchn = evtchn_from_per_cpu_irq(irq, cpu);
251+
252+ spin_lock(&irq_mapping_update_lock);
253+
254+ if (VALID_EVTCHN(evtchn)) {
255+ struct irq_desc *desc = irq_desc + irq;
256+
257+ mask_evtchn(evtchn);
258+
259+ BUG_ON(irq_bindcount[irq] <= 1);
260+ irq_bindcount[irq]--;
261+ cpu_clear(cpu, desc->affinity);
262+
263+ close.port = evtchn;
264+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
265+ BUG();
266+
267+ switch (type_from_irq(irq)) {
268+ case IRQT_IPI:
269+ per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = -1;
270+ break;
271+ default:
272+ BUG();
273+ break;
274+ }
275+
276+ /* Closed ports are implicitly re-bound to VCPU0. */
277+ bind_evtchn_to_cpu(evtchn, 0);
278+
279+ evtchn_to_irq[evtchn] = -1;
280+ }
281+
282+ spin_unlock(&irq_mapping_update_lock);
283+}
284+#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */
285+
286 int bind_caller_port_to_irqhandler(
287 unsigned int caller_port,
288 irq_handler_t handler,
289@@ -606,6 +689,8 @@ int bind_virq_to_irqhandler(
290 }
291 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
292
293+#ifdef CONFIG_SMP
294+#ifdef PER_CPU_IPI_IRQ
295 int bind_ipi_to_irqhandler(
296 unsigned int ipi,
297 unsigned int cpu,
298@@ -628,7 +713,72 @@ int bind_ipi_to_irqhandler(
299
300 return irq;
301 }
302-EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
303+#else
304+int __cpuinit bind_ipi_to_irqaction(
305+ unsigned int ipi,
306+ unsigned int cpu,
307+ struct irqaction *action)
308+{
309+ struct evtchn_bind_ipi bind_ipi;
310+ int evtchn, irq, retval = 0;
311+
312+ spin_lock(&irq_mapping_update_lock);
313+
314+ if (per_cpu(ipi_to_evtchn, cpu)[ipi] != -1) {
315+ spin_unlock(&irq_mapping_update_lock);
316+ return -EBUSY;
317+ }
318+
319+ if ((irq = ipi_to_irq[ipi]) == -1) {
320+ if ((irq = find_unbound_irq()) < 0) {
321+ spin_unlock(&irq_mapping_update_lock);
322+ return irq;
323+ }
324+
325+ /* Extra reference so count will never drop to zero. */
326+ irq_bindcount[irq]++;
327+
328+ ipi_to_irq[ipi] = irq;
329+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, 0);
330+ irq_desc[irq].handle_irq = handle_percpu_irq;
331+ retval = 1;
332+ }
333+
334+ bind_ipi.vcpu = cpu;
335+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
336+ &bind_ipi) != 0)
337+ BUG();
338+
339+ evtchn = bind_ipi.port;
340+ evtchn_to_irq[evtchn] = irq;
341+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
342+
343+ bind_evtchn_to_cpu(evtchn, cpu);
344+
345+ irq_bindcount[irq]++;
346+
347+ spin_unlock(&irq_mapping_update_lock);
348+
349+ if (retval == 0) {
350+ unsigned long flags;
351+
352+ local_irq_save(flags);
353+ unmask_evtchn(evtchn);
354+ local_irq_restore(flags);
355+ } else {
356+ action->flags |= IRQF_PERCPU;
357+ retval = setup_irq(irq, action);
358+ if (retval) {
359+ unbind_from_per_cpu_irq(irq, cpu);
360+ BUG_ON(retval > 0);
361+ irq = retval;
362+ }
363+ }
364+
365+ return irq;
366+}
367+#endif /* PER_CPU_IPI_IRQ */
368+#endif /* CONFIG_SMP */
369
370 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
371 {
372@@ -654,6 +804,7 @@ static void rebind_irq_to_cpu(unsigned i
373 {
374 int evtchn = evtchn_from_irq(irq);
375
376+ BUG_IF_IPI(irq);
377 if (VALID_EVTCHN(evtchn))
378 rebind_evtchn_to_cpu(evtchn, tcpu);
379 }
380@@ -737,6 +888,7 @@ static struct irq_chip dynirq_chip = {
381 .unmask = unmask_dynirq,
382 .mask_ack = ack_dynirq,
383 .ack = ack_dynirq,
384+ .eoi = end_dynirq,
385 .end = end_dynirq,
386 #ifdef CONFIG_SMP
387 .set_affinity = set_affinity_irq,
388@@ -909,10 +1061,21 @@ int irq_ignore_unhandled(unsigned int ir
389 return !!(irq_status.flags & XENIRQSTAT_shared);
390 }
391
392+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
393+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
394+{
395+ int evtchn = evtchn_from_per_cpu_irq(ipi_to_irq[ipi], cpu);
396+
397+ if (VALID_EVTCHN(evtchn))
398+ notify_remote_via_evtchn(evtchn);
399+}
400+#endif
401+
402 void notify_remote_via_irq(int irq)
403 {
404 int evtchn = evtchn_from_irq(irq);
405
406+ BUG_IF_IPI(irq);
407 if (VALID_EVTCHN(evtchn))
408 notify_remote_via_evtchn(evtchn);
409 }
410@@ -920,6 +1083,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq)
411
412 int irq_to_evtchn_port(int irq)
413 {
414+ BUG_IF_IPI(irq);
415 return evtchn_from_irq(irq);
416 }
417 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
418@@ -1035,11 +1199,16 @@ static void restore_cpu_virqs(unsigned i
419
420 static void restore_cpu_ipis(unsigned int cpu)
421 {
422+#ifdef CONFIG_SMP
423 struct evtchn_bind_ipi bind_ipi;
424 int ipi, irq, evtchn;
425
426 for (ipi = 0; ipi < NR_IPIS; ipi++) {
427+#ifdef PER_CPU_IPI_IRQ
428 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
429+#else
430+ if ((irq = ipi_to_irq[ipi]) == -1)
431+#endif
432 continue;
433
434 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
435@@ -1053,13 +1222,17 @@ static void restore_cpu_ipis(unsigned in
436
437 /* Record the new mapping. */
438 evtchn_to_irq[evtchn] = irq;
439+#ifdef PER_CPU_IPI_IRQ
440 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
441+#else
442+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
443+#endif
444 bind_evtchn_to_cpu(evtchn, cpu);
445
446 /* Ready for use. */
447 unmask_evtchn(evtchn);
448-
449 }
450+#endif
451 }
452
453 static int evtchn_resume(struct sys_device *dev)
454@@ -1103,8 +1276,17 @@ static int evtchn_resume(struct sys_devi
455
456 for_each_possible_cpu(cpu) {
457 restore_cpu_virqs(cpu);
458+#ifdef PER_CPU_IPI_IRQ
459 restore_cpu_ipis(cpu);
460+#else
461+ /* No IPI <-> event-channel mappings. */
462+ for (irq = 0; irq < NR_IPIS; ++irq)
463+ per_cpu(ipi_to_evtchn, cpu)[irq] = -1;
464+#endif
465 }
466+#ifndef PER_CPU_IPI_IRQ
467+ restore_cpu_ipis(smp_processor_id());
468+#endif
469
470 return 0;
471 }
472Index: head-2008-12-01/drivers/xen/core/smpboot.c
473===================================================================
474--- head-2008-12-01.orig/drivers/xen/core/smpboot.c 2008-12-01 12:07:15.000000000 +0100
475+++ head-2008-12-01/drivers/xen/core/smpboot.c 2008-12-01 12:07:34.000000000 +0100
476@@ -53,12 +53,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
477 DEFINE_PER_CPU(int, cpu_state) = { 0 };
478 #endif
479
480-static DEFINE_PER_CPU(int, resched_irq);
481-static DEFINE_PER_CPU(int, callfunc_irq);
482-static DEFINE_PER_CPU(int, call1func_irq);
483-static char resched_name[NR_CPUS][15];
484-static char callfunc_name[NR_CPUS][15];
485-static char call1func_name[NR_CPUS][15];
486+static int __read_mostly resched_irq = -1;
487+static int __read_mostly callfunc_irq = -1;
488+static int __read_mostly call1func_irq = -1;
489
490 #ifdef CONFIG_X86_LOCAL_APIC
491 #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid))
492@@ -117,43 +114,50 @@ remove_siblinginfo(unsigned int cpu)
493
494 static int __cpuinit xen_smp_intr_init(unsigned int cpu)
495 {
496+ static struct irqaction resched_action = {
497+ .handler = smp_reschedule_interrupt,
498+ .flags = IRQF_DISABLED,
499+ .name = "resched"
500+ }, callfunc_action = {
501+ .handler = smp_call_function_interrupt,
502+ .flags = IRQF_DISABLED,
503+ .name = "callfunc"
504+ }, call1func_action = {
505+ .handler = smp_call_function_single_interrupt,
506+ .flags = IRQF_DISABLED,
507+ .name = "call1func"
508+ };
509 int rc;
510
511- per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) =
512- per_cpu(call1func_irq, cpu) = -1;
513-
514- sprintf(resched_name[cpu], "resched%u", cpu);
515- rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
516- cpu,
517- smp_reschedule_interrupt,
518- IRQF_DISABLED|IRQF_NOBALANCING,
519- resched_name[cpu],
520- NULL);
521+ rc = bind_ipi_to_irqaction(RESCHEDULE_VECTOR,
522+ cpu,
523+ &resched_action);
524 if (rc < 0)
525 goto fail;
526- per_cpu(resched_irq, cpu) = rc;
527-
528- sprintf(callfunc_name[cpu], "callfunc%u", cpu);
529- rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
530- cpu,
531- smp_call_function_interrupt,
532- IRQF_DISABLED|IRQF_NOBALANCING,
533- callfunc_name[cpu],
534- NULL);
535+ if (resched_irq < 0)
536+ resched_irq = rc;
537+ else
538+ BUG_ON(resched_irq != rc);
539+
540+ rc = bind_ipi_to_irqaction(CALL_FUNCTION_VECTOR,
541+ cpu,
542+ &callfunc_action);
543 if (rc < 0)
544 goto fail;
545- per_cpu(callfunc_irq, cpu) = rc;
546-
547- sprintf(call1func_name[cpu], "call1func%u", cpu);
548- rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR,
549- cpu,
550- smp_call_function_single_interrupt,
551- IRQF_DISABLED|IRQF_NOBALANCING,
552- call1func_name[cpu],
553- NULL);
554+ if (callfunc_irq < 0)
555+ callfunc_irq = rc;
556+ else
557+ BUG_ON(callfunc_irq != rc);
558+
559+ rc = bind_ipi_to_irqaction(CALL_FUNC_SINGLE_VECTOR,
560+ cpu,
561+ &call1func_action);
562 if (rc < 0)
563 goto fail;
564- per_cpu(call1func_irq, cpu) = rc;
565+ if (call1func_irq < 0)
566+ call1func_irq = rc;
567+ else
568+ BUG_ON(call1func_irq != rc);
569
570 rc = xen_spinlock_init(cpu);
571 if (rc < 0)
572@@ -165,12 +169,12 @@ static int __cpuinit xen_smp_intr_init(u
573 return 0;
574
575 fail:
576- if (per_cpu(resched_irq, cpu) >= 0)
577- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
578- if (per_cpu(callfunc_irq, cpu) >= 0)
579- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
580- if (per_cpu(call1func_irq, cpu) >= 0)
581- unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
582+ if (resched_irq >= 0)
583+ unbind_from_per_cpu_irq(resched_irq, cpu);
584+ if (callfunc_irq >= 0)
585+ unbind_from_per_cpu_irq(callfunc_irq, cpu);
586+ if (call1func_irq >= 0)
587+ unbind_from_per_cpu_irq(call1func_irq, cpu);
588 xen_spinlock_cleanup(cpu);
589 return rc;
590 }
591@@ -181,9 +185,9 @@ static void __cpuinit xen_smp_intr_exit(
592 if (cpu != 0)
593 local_teardown_timer(cpu);
594
595- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
596- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
597- unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
598+ unbind_from_per_cpu_irq(resched_irq, cpu);
599+ unbind_from_per_cpu_irq(callfunc_irq, cpu);
600+ unbind_from_per_cpu_irq(call1func_irq, cpu);
601 xen_spinlock_cleanup(cpu);
602 }
603 #endif
604Index: head-2008-12-01/drivers/xen/core/spinlock.c
605===================================================================
606--- head-2008-12-01.orig/drivers/xen/core/spinlock.c 2008-12-01 11:51:53.000000000 +0100
607+++ head-2008-12-01/drivers/xen/core/spinlock.c 2008-12-01 12:07:34.000000000 +0100
608@@ -14,8 +14,7 @@
609
610 extern irqreturn_t smp_reschedule_interrupt(int, void *);
611
612-static DEFINE_PER_CPU(int, spinlock_irq) = -1;
613-static char spinlock_name[NR_CPUS][15];
614+static int __read_mostly spinlock_irq = -1;
615
616 struct spinning {
617 raw_spinlock_t *lock;
618@@ -32,34 +31,37 @@ static DEFINE_PER_CPU(raw_rwlock_t, spin
619
620 int __cpuinit xen_spinlock_init(unsigned int cpu)
621 {
622+ static struct irqaction spinlock_action = {
623+ .handler = smp_reschedule_interrupt,
624+ .flags = IRQF_DISABLED,
625+ .name = "spinlock"
626+ };
627 int rc;
628
629- sprintf(spinlock_name[cpu], "spinlock%u", cpu);
630- rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR,
631- cpu,
632- smp_reschedule_interrupt,
633- IRQF_DISABLED|IRQF_NOBALANCING,
634- spinlock_name[cpu],
635- NULL);
636+ rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR,
637+ cpu,
638+ &spinlock_action);
639 if (rc < 0)
640 return rc;
641
642- disable_irq(rc); /* make sure it's never delivered */
643- per_cpu(spinlock_irq, cpu) = rc;
644+ if (spinlock_irq < 0) {
645+ disable_irq(rc); /* make sure it's never delivered */
646+ spinlock_irq = rc;
647+ } else
648+ BUG_ON(spinlock_irq != rc);
649
650 return 0;
651 }
652
653 void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
654 {
655- if (per_cpu(spinlock_irq, cpu) >= 0)
656- unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL);
657- per_cpu(spinlock_irq, cpu) = -1;
658+ if (spinlock_irq >= 0)
659+ unbind_from_per_cpu_irq(spinlock_irq, cpu);
660 }
661
662 int xen_spin_wait(raw_spinlock_t *lock, unsigned int token)
663 {
664- int rc = 0, irq = __get_cpu_var(spinlock_irq);
665+ int rc = 0, irq = spinlock_irq;
666 raw_rwlock_t *rm_lock;
667 unsigned long flags;
668 struct spinning spinning;
669@@ -153,7 +155,7 @@ void xen_spin_kick(raw_spinlock_t *lock,
670 raw_local_irq_restore(flags);
671
672 if (unlikely(spinning)) {
673- notify_remote_via_irq(per_cpu(spinlock_irq, cpu));
674+ notify_remote_via_ipi(SPIN_UNLOCK_VECTOR, cpu);
675 return;
676 }
677 }
678Index: head-2008-12-01/include/xen/evtchn.h
679===================================================================
680--- head-2008-12-01.orig/include/xen/evtchn.h 2008-12-01 12:07:30.000000000 +0100
681+++ head-2008-12-01/include/xen/evtchn.h 2008-12-01 12:07:34.000000000 +0100
682@@ -78,6 +78,8 @@ int bind_virq_to_irqhandler(
683 unsigned long irqflags,
684 const char *devname,
685 void *dev_id);
686+#if defined(CONFIG_SMP) && !defined(MODULE)
687+#ifndef CONFIG_X86
688 int bind_ipi_to_irqhandler(
689 unsigned int ipi,
690 unsigned int cpu,
691@@ -85,6 +87,13 @@ int bind_ipi_to_irqhandler(
692 unsigned long irqflags,
693 const char *devname,
694 void *dev_id);
695+#else
696+int bind_ipi_to_irqaction(
697+ unsigned int ipi,
698+ unsigned int cpu,
699+ struct irqaction *action);
700+#endif
701+#endif
702
703 /*
704 * Common unbind function for all event sources. Takes IRQ to unbind from.
705@@ -93,6 +102,11 @@ int bind_ipi_to_irqhandler(
706 */
707 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
708
709+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
710+/* Specialized unbind function for per-CPU IRQs. */
711+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu);
712+#endif
713+
714 #ifndef CONFIG_XEN
715 void irq_resume(void);
716 #endif
717@@ -184,4 +198,8 @@ int clear_pirq_hw_action(int pirq);
718 #define PIRQ_END 5
719 #define PIRQ_ACK 6
720
721+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
722+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu);
723+#endif
724+
725 #endif /* __ASM_EVTCHN_H__ */