]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/60054_xen-virq-per-cpu-irq.patch1
Fix core28 updater kernel version
[people/pmueller/ipfire-2.x.git] / src / patches / 60054_xen-virq-per-cpu-irq.patch1
1 From: jbeulich@novell.com
2 Subject: fold per-CPU VIRQs onto a single IRQ each
3 Patch-mainline: obsolete
4
5 Index: head-2008-12-01/arch/x86/kernel/time_32-xen.c
6 ===================================================================
7 --- head-2008-12-01.orig/arch/x86/kernel/time_32-xen.c 2008-12-01 12:07:30.000000000 +0100
8 +++ head-2008-12-01/arch/x86/kernel/time_32-xen.c 2008-12-01 12:08:40.000000000 +0100
9 @@ -729,19 +729,17 @@ int xen_update_persistent_clock(void)
10 }
11
12 /* Dynamically-mapped IRQ. */
13 -DEFINE_PER_CPU(int, timer_irq);
14 +static int __read_mostly timer_irq = -1;
15 +static struct irqaction timer_action = {
16 + .handler = timer_interrupt,
17 + .flags = IRQF_DISABLED,
18 + .name = "timer"
19 +};
20
21 static void __init setup_cpu0_timer_irq(void)
22 {
23 - per_cpu(timer_irq, 0) =
24 - bind_virq_to_irqhandler(
25 - VIRQ_TIMER,
26 - 0,
27 - timer_interrupt,
28 - IRQF_DISABLED|IRQF_NOBALANCING,
29 - "timer0",
30 - NULL);
31 - BUG_ON(per_cpu(timer_irq, 0) < 0);
32 + timer_irq = bind_virq_to_irqaction(VIRQ_TIMER, 0, &timer_action);
33 + BUG_ON(timer_irq < 0);
34 }
35
36 void __init time_init(void)
37 @@ -868,8 +866,6 @@ void xen_halt(void)
38 EXPORT_SYMBOL(xen_halt);
39
40 #ifdef CONFIG_SMP
41 -static char timer_name[NR_CPUS][15];
42 -
43 int __cpuinit local_setup_timer(unsigned int cpu)
44 {
45 int seq, irq;
46 @@ -895,16 +891,10 @@ int __cpuinit local_setup_timer(unsigned
47 init_missing_ticks_accounting(cpu);
48 } while (read_seqretry(&xtime_lock, seq));
49
50 - sprintf(timer_name[cpu], "timer%u", cpu);
51 - irq = bind_virq_to_irqhandler(VIRQ_TIMER,
52 - cpu,
53 - timer_interrupt,
54 - IRQF_DISABLED|IRQF_NOBALANCING,
55 - timer_name[cpu],
56 - NULL);
57 + irq = bind_virq_to_irqaction(VIRQ_TIMER, cpu, &timer_action);
58 if (irq < 0)
59 return irq;
60 - per_cpu(timer_irq, cpu) = irq;
61 + BUG_ON(timer_irq != irq);
62
63 return 0;
64 }
65 @@ -912,7 +902,7 @@ int __cpuinit local_setup_timer(unsigned
66 void __cpuinit local_teardown_timer(unsigned int cpu)
67 {
68 BUG_ON(cpu == 0);
69 - unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
70 + unbind_from_per_cpu_irq(timer_irq, cpu, &timer_action);
71 }
72 #endif
73
74 Index: head-2008-12-01/drivers/xen/core/evtchn.c
75 ===================================================================
76 --- head-2008-12-01.orig/drivers/xen/core/evtchn.c 2008-12-02 09:14:29.000000000 +0100
77 +++ head-2008-12-01/drivers/xen/core/evtchn.c 2008-12-03 15:54:25.000000000 +0100
78 @@ -57,6 +57,23 @@ static DEFINE_SPINLOCK(irq_mapping_updat
79 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
80 [0 ... NR_EVENT_CHANNELS-1] = -1 };
81
82 +#if defined(CONFIG_SMP) && defined(CONFIG_X86)
83 +static struct per_cpu_irqaction {
84 + struct irqaction action; /* must be first */
85 + struct per_cpu_irqaction *next;
86 + cpumask_t cpus;
87 +} *virq_actions[NR_VIRQS];
88 +/* IRQ <-> VIRQ mapping. */
89 +static DECLARE_BITMAP(virq_per_cpu, NR_VIRQS) __read_mostly;
90 +static DEFINE_PER_CPU(int[NR_VIRQS], virq_to_evtchn) = {[0 ... NR_VIRQS-1] = -1};
91 +#define BUG_IF_VIRQ_PER_CPU(irq) \
92 + BUG_ON(type_from_irq(irq) == IRQT_VIRQ \
93 + && test_bit(index_from_irq(irq), virq_per_cpu))
94 +#else
95 +#define BUG_IF_VIRQ_PER_CPU(irq) ((void)(irq))
96 +#define PER_CPU_VIRQ_IRQ
97 +#endif
98 +
99 /* IRQ <-> IPI mapping. */
100 #ifndef NR_IPIS
101 #define NR_IPIS 1
102 @@ -113,13 +130,6 @@ static inline u32 mk_irq_info(u32 type,
103 * Accessors for packed IRQ information.
104 */
105
106 -#ifdef PER_CPU_IPI_IRQ
107 -static inline unsigned int evtchn_from_irq(int irq)
108 -{
109 - return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
110 -}
111 -#endif
112 -
113 static inline unsigned int index_from_irq(int irq)
114 {
115 return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
116 @@ -130,20 +140,34 @@ static inline unsigned int type_from_irq
117 return irq_info[irq] >> (32 - _IRQT_BITS);
118 }
119
120 -#ifndef PER_CPU_IPI_IRQ
121 static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
122 {
123 - BUG_ON(type_from_irq(irq) != IRQT_IPI);
124 - return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)];
125 + switch (type_from_irq(irq)) {
126 +#ifndef PER_CPU_VIRQ_IRQ
127 + case IRQT_VIRQ:
128 + return per_cpu(virq_to_evtchn, cpu)[index_from_irq(irq)];
129 +#endif
130 +#ifndef PER_CPU_IPI_IRQ
131 + case IRQT_IPI:
132 + return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)];
133 +#endif
134 + }
135 + BUG();
136 }
137
138 static inline unsigned int evtchn_from_irq(unsigned int irq)
139 {
140 - if (type_from_irq(irq) != IRQT_IPI)
141 - return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
142 - return evtchn_from_per_cpu_irq(irq, smp_processor_id());
143 -}
144 + switch (type_from_irq(irq)) {
145 +#ifndef PER_CPU_VIRQ_IRQ
146 + case IRQT_VIRQ:
147 +#endif
148 +#ifndef PER_CPU_IPI_IRQ
149 + case IRQT_IPI:
150 #endif
151 + return evtchn_from_per_cpu_irq(irq, smp_processor_id());
152 + }
153 + return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
154 +}
155
156 /* IRQ <-> VIRQ mapping. */
157 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
158 @@ -463,6 +487,14 @@ static int bind_virq_to_irq(unsigned int
159 evtchn = bind_virq.port;
160
161 evtchn_to_irq[evtchn] = irq;
162 +#ifndef PER_CPU_VIRQ_IRQ
163 + {
164 + unsigned int cpu;
165 +
166 + for_each_possible_cpu(cpu)
167 + per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
168 + }
169 +#endif
170 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
171
172 per_cpu(virq_to_irq, cpu)[virq] = irq;
173 @@ -517,7 +549,9 @@ static void unbind_from_irq(unsigned int
174 unsigned int cpu;
175 int evtchn = evtchn_from_irq(irq);
176
177 + BUG_IF_VIRQ_PER_CPU(irq);
178 BUG_IF_IPI(irq);
179 +
180 spin_lock(&irq_mapping_update_lock);
181
182 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
183 @@ -530,6 +564,11 @@ static void unbind_from_irq(unsigned int
184 case IRQT_VIRQ:
185 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
186 [index_from_irq(irq)] = -1;
187 +#ifndef PER_CPU_VIRQ_IRQ
188 + for_each_possible_cpu(cpu)
189 + per_cpu(virq_to_evtchn, cpu)
190 + [index_from_irq(irq)] = -1;
191 +#endif
192 break;
193 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
194 case IRQT_IPI:
195 @@ -555,11 +594,13 @@ static void unbind_from_irq(unsigned int
196 spin_unlock(&irq_mapping_update_lock);
197 }
198
199 -#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
200 -void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
201 +#if defined(CONFIG_SMP) && (!defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ))
202 +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu,
203 + struct irqaction *action)
204 {
205 struct evtchn_close close;
206 int evtchn = evtchn_from_per_cpu_irq(irq, cpu);
207 + struct irqaction *free_action = NULL;
208
209 spin_lock(&irq_mapping_update_lock);
210
211 @@ -570,6 +611,32 @@ void unbind_from_per_cpu_irq(unsigned in
212
213 BUG_ON(irq_bindcount[irq] <= 1);
214 irq_bindcount[irq]--;
215 +
216 +#ifndef PER_CPU_VIRQ_IRQ
217 + if (type_from_irq(irq) == IRQT_VIRQ) {
218 + unsigned int virq = index_from_irq(irq);
219 + struct per_cpu_irqaction *cur, *prev = NULL;
220 +
221 + cur = virq_actions[virq];
222 + while (cur) {
223 + if (cur->action.dev_id == action) {
224 + cpu_clear(cpu, cur->cpus);
225 + if (cpus_empty(cur->cpus)) {
226 + if (prev)
227 + prev->next = cur->next;
228 + else
229 + virq_actions[virq] = cur->next;
230 + free_action = action;
231 + }
232 + } else if (cpu_isset(cpu, cur->cpus))
233 + evtchn = 0;
234 + cur = (prev = cur)->next;
235 + }
236 + if (!VALID_EVTCHN(evtchn))
237 + goto done;
238 + }
239 +#endif
240 +
241 cpu_clear(cpu, desc->affinity);
242
243 close.port = evtchn;
244 @@ -577,9 +644,16 @@ void unbind_from_per_cpu_irq(unsigned in
245 BUG();
246
247 switch (type_from_irq(irq)) {
248 +#ifndef PER_CPU_VIRQ_IRQ
249 + case IRQT_VIRQ:
250 + per_cpu(virq_to_evtchn, cpu)[index_from_irq(irq)] = -1;
251 + break;
252 +#endif
253 +#ifndef PER_CPU_IPI_IRQ
254 case IRQT_IPI:
255 per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = -1;
256 break;
257 +#endif
258 default:
259 BUG();
260 break;
261 @@ -591,9 +665,16 @@ void unbind_from_per_cpu_irq(unsigned in
262 evtchn_to_irq[evtchn] = -1;
263 }
264
265 +#ifndef PER_CPU_VIRQ_IRQ
266 +done:
267 +#endif
268 spin_unlock(&irq_mapping_update_lock);
269 +
270 + if (free_action)
271 + free_irq(irq, free_action);
272 }
273 -#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */
274 +EXPORT_SYMBOL_GPL(unbind_from_per_cpu_irq);
275 +#endif /* CONFIG_SMP && (!PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ) */
276
277 int bind_caller_port_to_irqhandler(
278 unsigned int caller_port,
279 @@ -675,6 +756,8 @@ int bind_virq_to_irqhandler(
280 {
281 int irq, retval;
282
283 + BUG_IF_VIRQ_PER_CPU(virq);
284 +
285 irq = bind_virq_to_irq(virq, cpu);
286 if (irq < 0)
287 return irq;
288 @@ -690,6 +773,108 @@ int bind_virq_to_irqhandler(
289 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
290
291 #ifdef CONFIG_SMP
292 +#ifndef PER_CPU_VIRQ_IRQ
293 +int bind_virq_to_irqaction(
294 + unsigned int virq,
295 + unsigned int cpu,
296 + struct irqaction *action)
297 +{
298 + struct evtchn_bind_virq bind_virq;
299 + int evtchn, irq, retval = 0;
300 + struct per_cpu_irqaction *cur = NULL, *new;
301 +
302 + BUG_ON(!test_bit(virq, virq_per_cpu));
303 +
304 + if (action->dev_id)
305 + return -EINVAL;
306 +
307 + new = kzalloc(sizeof(*new), GFP_ATOMIC);
308 + if (new) {
309 + new->action = *action;
310 + new->action.dev_id = action;
311 + }
312 +
313 + spin_lock(&irq_mapping_update_lock);
314 +
315 + for (cur = virq_actions[virq]; cur; cur = cur->next)
316 + if (cur->action.dev_id == action)
317 + break;
318 + if (!cur) {
319 + if (!new) {
320 + spin_unlock(&irq_mapping_update_lock);
321 + return -ENOMEM;
322 + }
323 + new->next = virq_actions[virq];
324 + virq_actions[virq] = cur = new;
325 + retval = 1;
326 + }
327 + cpu_set(cpu, cur->cpus);
328 + action = &cur->action;
329 +
330 + if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
331 + unsigned int cpu;
332 +
333 + BUG_ON(!retval);
334 +
335 + if ((irq = find_unbound_irq()) < 0) {
336 + if (cur)
337 + virq_actions[virq] = cur->next;
338 + spin_unlock(&irq_mapping_update_lock);
339 + if (cur != new)
340 + kfree(new);
341 + return irq;
342 + }
343 +
344 + /* Extra reference so count will never drop to zero. */
345 + irq_bindcount[irq]++;
346 +
347 + for_each_possible_cpu(cpu)
348 + per_cpu(virq_to_irq, cpu)[virq] = irq;
349 + irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, 0);
350 + irq_desc[irq].handle_irq = handle_percpu_irq;
351 + }
352 +
353 + if ((evtchn = per_cpu(virq_to_evtchn, cpu)[virq]) == -1) {
354 + bind_virq.virq = virq;
355 + bind_virq.vcpu = cpu;
356 + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
357 + &bind_virq) != 0)
358 + BUG();
359 + evtchn = bind_virq.port;
360 + evtchn_to_irq[evtchn] = irq;
361 + per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
362 +
363 + bind_evtchn_to_cpu(evtchn, cpu);
364 + }
365 +
366 + irq_bindcount[irq]++;
367 +
368 + spin_unlock(&irq_mapping_update_lock);
369 +
370 + if (cur != new)
371 + kfree(new);
372 +
373 + if (retval == 0) {
374 + unsigned long flags;
375 +
376 + local_irq_save(flags);
377 + unmask_evtchn(evtchn);
378 + local_irq_restore(flags);
379 + } else {
380 + action->flags |= IRQF_PERCPU;
381 + retval = setup_irq(irq, action);
382 + if (retval) {
383 + unbind_from_per_cpu_irq(irq, cpu, cur->action.dev_id);
384 + BUG_ON(retval > 0);
385 + irq = retval;
386 + }
387 + }
388 +
389 + return irq;
390 +}
391 +EXPORT_SYMBOL_GPL(bind_virq_to_irqaction);
392 +#endif
393 +
394 #ifdef PER_CPU_IPI_IRQ
395 int bind_ipi_to_irqhandler(
396 unsigned int ipi,
397 @@ -769,7 +954,7 @@ int __cpuinit bind_ipi_to_irqaction(
398 action->flags |= IRQF_PERCPU;
399 retval = setup_irq(irq, action);
400 if (retval) {
401 - unbind_from_per_cpu_irq(irq, cpu);
402 + unbind_from_per_cpu_irq(irq, cpu, NULL);
403 BUG_ON(retval > 0);
404 irq = retval;
405 }
406 @@ -804,7 +989,9 @@ static void rebind_irq_to_cpu(unsigned i
407 {
408 int evtchn = evtchn_from_irq(irq);
409
410 + BUG_IF_VIRQ_PER_CPU(irq);
411 BUG_IF_IPI(irq);
412 +
413 if (VALID_EVTCHN(evtchn))
414 rebind_evtchn_to_cpu(evtchn, tcpu);
415 }
416 @@ -1075,7 +1262,9 @@ void notify_remote_via_irq(int irq)
417 {
418 int evtchn = evtchn_from_irq(irq);
419
420 + BUG_ON(type_from_irq(irq) == IRQT_VIRQ);
421 BUG_IF_IPI(irq);
422 +
423 if (VALID_EVTCHN(evtchn))
424 notify_remote_via_evtchn(evtchn);
425 }
426 @@ -1083,6 +1272,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq)
427
428 int irq_to_evtchn_port(int irq)
429 {
430 + BUG_IF_VIRQ_PER_CPU(irq);
431 BUG_IF_IPI(irq);
432 return evtchn_from_irq(irq);
433 }
434 @@ -1177,6 +1367,20 @@ static void restore_cpu_virqs(unsigned i
435 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
436 continue;
437
438 +#ifndef PER_CPU_VIRQ_IRQ
439 + if (test_bit(virq, virq_per_cpu)) {
440 + const struct per_cpu_irqaction *cur;
441 +
442 + if(cpu != smp_processor_id())
443 + continue;
444 + for (cur = virq_actions[virq]; cur; cur = cur->next)
445 + if (cpu_isset(cpu, cur->cpus))
446 + break;
447 + if (!cur)
448 + continue;
449 + }
450 +#endif
451 +
452 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
453
454 /* Get a new binding from Xen. */
455 @@ -1189,7 +1393,19 @@ static void restore_cpu_virqs(unsigned i
456
457 /* Record the new mapping. */
458 evtchn_to_irq[evtchn] = irq;
459 +#ifndef PER_CPU_VIRQ_IRQ
460 + if (test_bit(virq, virq_per_cpu))
461 + per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
462 + else {
463 + unsigned int cpu;
464 +
465 + irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
466 + for_each_possible_cpu(cpu)
467 + per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
468 + }
469 +#else
470 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
471 +#endif
472 bind_evtchn_to_cpu(evtchn, cpu);
473
474 /* Ready for use. */
475 @@ -1242,7 +1458,11 @@ static int evtchn_resume(struct sys_devi
476
477 /* Avoid doing anything in the 'suspend cancelled' case. */
478 status.dom = DOMID_SELF;
479 +#ifdef PER_CPU_VIRQ_IRQ
480 status.port = evtchn_from_irq(__get_cpu_var(virq_to_irq)[VIRQ_TIMER]);
481 +#else
482 + status.port = __get_cpu_var(virq_to_evtchn)[VIRQ_TIMER];
483 +#endif
484 if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status))
485 BUG();
486 if (status.status == EVTCHNSTAT_virq
487 @@ -1391,6 +1611,15 @@ void __init xen_init_IRQ(void)
488 unsigned int i;
489 struct physdev_pirq_eoi_mfn eoi_mfn;
490
491 +#ifndef PER_CPU_VIRQ_IRQ
492 + __set_bit(VIRQ_TIMER, virq_per_cpu);
493 + __set_bit(VIRQ_DEBUG, virq_per_cpu);
494 + __set_bit(VIRQ_XENOPROF, virq_per_cpu);
495 +#ifdef CONFIG_IA64
496 + __set_bit(VIRQ_ITC, virq_per_cpu);
497 +#endif
498 +#endif
499 +
500 init_evtchn_cpu_bindings();
501
502 BUG_ON(!bitmap_empty(pirq_needs_eoi, PAGE_SIZE * 8));
503 Index: head-2008-12-01/drivers/xen/core/smpboot.c
504 ===================================================================
505 --- head-2008-12-01.orig/drivers/xen/core/smpboot.c 2008-12-01 12:07:34.000000000 +0100
506 +++ head-2008-12-01/drivers/xen/core/smpboot.c 2008-12-01 12:08:40.000000000 +0100
507 @@ -170,11 +170,11 @@ static int __cpuinit xen_smp_intr_init(u
508
509 fail:
510 if (resched_irq >= 0)
511 - unbind_from_per_cpu_irq(resched_irq, cpu);
512 + unbind_from_per_cpu_irq(resched_irq, cpu, NULL);
513 if (callfunc_irq >= 0)
514 - unbind_from_per_cpu_irq(callfunc_irq, cpu);
515 + unbind_from_per_cpu_irq(callfunc_irq, cpu, NULL);
516 if (call1func_irq >= 0)
517 - unbind_from_per_cpu_irq(call1func_irq, cpu);
518 + unbind_from_per_cpu_irq(call1func_irq, cpu, NULL);
519 xen_spinlock_cleanup(cpu);
520 return rc;
521 }
522 @@ -185,9 +185,9 @@ static void __cpuinit xen_smp_intr_exit(
523 if (cpu != 0)
524 local_teardown_timer(cpu);
525
526 - unbind_from_per_cpu_irq(resched_irq, cpu);
527 - unbind_from_per_cpu_irq(callfunc_irq, cpu);
528 - unbind_from_per_cpu_irq(call1func_irq, cpu);
529 + unbind_from_per_cpu_irq(resched_irq, cpu, NULL);
530 + unbind_from_per_cpu_irq(callfunc_irq, cpu, NULL);
531 + unbind_from_per_cpu_irq(call1func_irq, cpu, NULL);
532 xen_spinlock_cleanup(cpu);
533 }
534 #endif
535 Index: head-2008-12-01/drivers/xen/core/spinlock.c
536 ===================================================================
537 --- head-2008-12-01.orig/drivers/xen/core/spinlock.c 2008-12-01 12:07:34.000000000 +0100
538 +++ head-2008-12-01/drivers/xen/core/spinlock.c 2008-12-01 12:08:40.000000000 +0100
539 @@ -56,7 +56,7 @@ int __cpuinit xen_spinlock_init(unsigned
540 void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
541 {
542 if (spinlock_irq >= 0)
543 - unbind_from_per_cpu_irq(spinlock_irq, cpu);
544 + unbind_from_per_cpu_irq(spinlock_irq, cpu, NULL);
545 }
546
547 int xen_spin_wait(raw_spinlock_t *lock, unsigned int token)
548 Index: head-2008-12-01/drivers/xen/netback/netback.c
549 ===================================================================
550 --- head-2008-12-01.orig/drivers/xen/netback/netback.c 2008-12-01 11:36:55.000000000 +0100
551 +++ head-2008-12-01/drivers/xen/netback/netback.c 2008-12-01 12:08:40.000000000 +0100
552 @@ -1543,6 +1543,12 @@ static irqreturn_t netif_be_dbg(int irq,
553
554 return IRQ_HANDLED;
555 }
556 +
557 +static struct irqaction netif_be_dbg_action = {
558 + .handler = netif_be_dbg,
559 + .flags = IRQF_SHARED,
560 + .name = "net-be-dbg"
561 +};
562 #endif
563
564 static int __init netback_init(void)
565 @@ -1602,12 +1608,9 @@ static int __init netback_init(void)
566 netif_xenbus_init();
567
568 #ifdef NETBE_DEBUG_INTERRUPT
569 - (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
570 - 0,
571 - netif_be_dbg,
572 - IRQF_SHARED,
573 - "net-be-dbg",
574 - &netif_be_dbg);
575 + (void)bind_virq_to_irqaction(VIRQ_DEBUG,
576 + 0,
577 + &netif_be_dbg_action);
578 #endif
579
580 return 0;
581 Index: head-2008-12-01/drivers/xen/xenoprof/xenoprofile.c
582 ===================================================================
583 --- head-2008-12-01.orig/drivers/xen/xenoprof/xenoprofile.c 2008-12-01 11:37:10.000000000 +0100
584 +++ head-2008-12-01/drivers/xen/xenoprof/xenoprofile.c 2008-12-01 12:08:40.000000000 +0100
585 @@ -212,6 +212,11 @@ static irqreturn_t xenoprof_ovf_interrup
586 return IRQ_HANDLED;
587 }
588
589 +static struct irqaction ovf_action = {
590 + .handler = xenoprof_ovf_interrupt,
591 + .flags = IRQF_DISABLED,
592 + .name = "xenoprof"
593 +};
594
595 static void unbind_virq(void)
596 {
597 @@ -219,7 +224,7 @@ static void unbind_virq(void)
598
599 for_each_online_cpu(i) {
600 if (ovf_irq[i] >= 0) {
601 - unbind_from_irqhandler(ovf_irq[i], NULL);
602 + unbind_from_per_cpu_irq(ovf_irq[i], i, &ovf_action);
603 ovf_irq[i] = -1;
604 }
605 }
606 @@ -232,12 +237,7 @@ static int bind_virq(void)
607 int result;
608
609 for_each_online_cpu(i) {
610 - result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
611 - i,
612 - xenoprof_ovf_interrupt,
613 - IRQF_DISABLED|IRQF_NOBALANCING,
614 - "xenoprof",
615 - NULL);
616 + result = bind_virq_to_irqaction(VIRQ_XENOPROF, i, &ovf_action);
617
618 if (result < 0) {
619 unbind_virq();
620 Index: head-2008-12-01/include/xen/evtchn.h
621 ===================================================================
622 --- head-2008-12-01.orig/include/xen/evtchn.h 2008-12-01 12:07:34.000000000 +0100
623 +++ head-2008-12-01/include/xen/evtchn.h 2008-12-01 12:08:40.000000000 +0100
624 @@ -78,6 +78,17 @@ int bind_virq_to_irqhandler(
625 unsigned long irqflags,
626 const char *devname,
627 void *dev_id);
628 +#if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86)
629 +int bind_virq_to_irqaction(
630 + unsigned int virq,
631 + unsigned int cpu,
632 + struct irqaction *action);
633 +#else
634 +#define bind_virq_to_irqaction(virq, cpu, action) \
635 + bind_virq_to_irqhandler(virq, cpu, (action)->handler, \
636 + (action)->flags | IRQF_NOBALANCING, \
637 + (action)->name, action)
638 +#endif
639 #if defined(CONFIG_SMP) && !defined(MODULE)
640 #ifndef CONFIG_X86
641 int bind_ipi_to_irqhandler(
642 @@ -102,9 +113,13 @@ int bind_ipi_to_irqaction(
643 */
644 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
645
646 -#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
647 +#if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86)
648 /* Specialized unbind function for per-CPU IRQs. */
649 -void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu);
650 +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu,
651 + struct irqaction *);
652 +#else
653 +#define unbind_from_per_cpu_irq(irq, cpu, action) \
654 + unbind_from_irqhandler(irq, action)
655 #endif
656
657 #ifndef CONFIG_XEN