From: jbeulich@novell.com Subject: fold IPIs onto a single IRQ each Patch-mainline: obsolete Index: head-2008-12-01/arch/x86/kernel/genapic_xen_64.c =================================================================== --- head-2008-12-01.orig/arch/x86/kernel/genapic_xen_64.c 2008-11-25 13:12:11.000000000 +0100 +++ head-2008-12-01/arch/x86/kernel/genapic_xen_64.c 2008-12-01 12:07:34.000000000 +0100 @@ -25,13 +25,9 @@ #include #include -DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); - static inline void __send_IPI_one(unsigned int cpu, int vector) { - int irq = per_cpu(ipi_to_irq, cpu)[vector]; - BUG_ON(irq < 0); - notify_remote_via_irq(irq); + notify_remote_via_ipi(vector, cpu); } static void xen_send_IPI_shortcut(unsigned int shortcut, Index: head-2008-12-01/arch/x86/kernel/ipi-xen.c =================================================================== --- head-2008-12-01.orig/arch/x86/kernel/ipi-xen.c 2008-11-25 13:12:11.000000000 +0100 +++ head-2008-12-01/arch/x86/kernel/ipi-xen.c 2008-12-01 12:07:34.000000000 +0100 @@ -48,15 +48,6 @@ static inline int __prepare_ICR2(unsigne } #else #include - -DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]); - -static inline void __send_IPI_one(unsigned int cpu, int vector) -{ - int irq = per_cpu(ipi_to_irq, cpu)[vector]; - BUG_ON(irq < 0); - notify_remote_via_irq(irq); -} #endif void __send_IPI_shortcut(unsigned int shortcut, int vector) @@ -90,12 +81,12 @@ void __send_IPI_shortcut(unsigned int sh switch (shortcut) { case APIC_DEST_SELF: - __send_IPI_one(smp_processor_id(), vector); + notify_remote_via_ipi(vector, smp_processor_id()); break; case APIC_DEST_ALLBUT: for_each_online_cpu(cpu) if (cpu != smp_processor_id()) - __send_IPI_one(cpu, vector); + notify_remote_via_ipi(vector, cpu); break; default: printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut, @@ -165,7 +156,7 @@ void send_IPI_mask_bitmask(const cpumask WARN_ON(!cpus_empty(mask)); for_each_online_cpu(cpu) if (cpu_isset(cpu, cpumask)) - __send_IPI_one(cpu, vector); + notify_remote_via_ipi(vector, cpu); #endif local_irq_restore(flags); } Index: head-2008-12-01/arch/x86/kernel/irq_32-xen.c =================================================================== --- head-2008-12-01.orig/arch/x86/kernel/irq_32-xen.c 2008-12-01 11:49:07.000000000 +0100 +++ head-2008-12-01/arch/x86/kernel/irq_32-xen.c 2008-12-01 12:07:34.000000000 +0100 @@ -404,6 +404,9 @@ void fixup_irqs(cpumask_t map) if (irq == 2) continue; + if (irq_desc[irq].status & IRQ_PER_CPU) + continue; + cpus_and(mask, irq_desc[irq].affinity, map); if (any_online_cpu(mask) == NR_CPUS) { /*printk("Breaking affinity for irq %i\n", irq);*/ Index: head-2008-12-01/arch/x86/kernel/irq_64-xen.c =================================================================== --- head-2008-12-01.orig/arch/x86/kernel/irq_64-xen.c 2008-12-01 11:49:07.000000000 +0100 +++ head-2008-12-01/arch/x86/kernel/irq_64-xen.c 2008-12-01 12:07:34.000000000 +0100 @@ -245,6 +245,7 @@ void fixup_irqs(cpumask_t map) spin_lock(&irq_desc[irq].lock); if (!irq_has_action(irq) || + (irq_desc[irq].status & IRQ_PER_CPU) || cpus_equal(irq_desc[irq].affinity, map)) { spin_unlock(&irq_desc[irq].lock); continue; Index: head-2008-12-01/drivers/xen/Kconfig =================================================================== --- head-2008-12-01.orig/drivers/xen/Kconfig 2008-10-24 10:52:17.000000000 +0200 +++ head-2008-12-01/drivers/xen/Kconfig 2008-12-01 12:07:34.000000000 +0100 @@ -4,6 +4,7 @@ config XEN bool + select IRQ_PER_CPU if SMP if XEN config XEN_INTERFACE_VERSION @@ -292,6 +293,9 @@ config HAVE_IRQ_IGNORE_UNHANDLED config GENERIC_HARDIRQS_NO__DO_IRQ def_bool y +config IRQ_PER_CPU + bool + config NO_IDLE_HZ def_bool y Index: head-2008-12-01/drivers/xen/core/evtchn.c =================================================================== --- head-2008-12-01.orig/drivers/xen/core/evtchn.c 2008-12-02 09:14:14.000000000 +0100 +++ head-2008-12-01/drivers/xen/core/evtchn.c 2008-12-02 09:14:29.000000000 +0100 @@ -57,6 +57,22 @@ static DEFINE_SPINLOCK(irq_mapping_updat static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; +/* IRQ <-> IPI mapping. */ +#ifndef NR_IPIS +#define NR_IPIS 1 +#endif +#if defined(CONFIG_SMP) && defined(CONFIG_X86) +static int ipi_to_irq[NR_IPIS] __read_mostly = {[0 ... NR_IPIS-1] = -1}; +static DEFINE_PER_CPU(int[NR_IPIS], ipi_to_evtchn) = {[0 ... NR_IPIS-1] = -1}; +#else +#define PER_CPU_IPI_IRQ +#endif +#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ) +#define BUG_IF_IPI(irq) BUG_ON(type_from_irq(irq) == IRQT_IPI) +#else +#define BUG_IF_IPI(irq) ((void)(irq)) +#endif + /* Packed IRQ information: binding type, sub-type index, and event channel. */ static u32 irq_info[NR_IRQS]; @@ -97,10 +113,12 @@ static inline u32 mk_irq_info(u32 type, * Accessors for packed IRQ information. */ +#ifdef PER_CPU_IPI_IRQ static inline unsigned int evtchn_from_irq(int irq) { return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1); } +#endif static inline unsigned int index_from_irq(int irq) { @@ -112,14 +130,28 @@ static inline unsigned int type_from_irq return irq_info[irq] >> (32 - _IRQT_BITS); } +#ifndef PER_CPU_IPI_IRQ +static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, unsigned int cpu) +{ + BUG_ON(type_from_irq(irq) != IRQT_IPI); + return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)]; +} + +static inline unsigned int evtchn_from_irq(unsigned int irq) +{ + if (type_from_irq(irq) != IRQT_IPI) + return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1); + return evtchn_from_per_cpu_irq(irq, smp_processor_id()); +} +#endif + /* IRQ <-> VIRQ mapping. */ DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) /* IRQ <-> IPI mapping. */ -#ifndef NR_IPIS -#define NR_IPIS 1 -#endif DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1}; +#endif /* Reference counts for bindings to IRQs. */ static int irq_bindcount[NR_IRQS]; @@ -144,8 +176,14 @@ static void bind_evtchn_to_cpu(unsigned BUG_ON(!test_bit(chn, s->evtchn_mask)); - if (irq != -1) - irq_desc[irq].affinity = cpumask_of_cpu(cpu); + if (irq != -1) { + struct irq_desc *desc = irq_desc + irq; + + if (!(desc->status & IRQ_PER_CPU)) + desc->affinity = cpumask_of_cpu(cpu); + else + cpu_set(cpu, desc->affinity); + } clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]); set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]); @@ -439,6 +477,7 @@ static int bind_virq_to_irq(unsigned int return irq; } +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; @@ -470,6 +509,7 @@ static int bind_ipi_to_irq(unsigned int spin_unlock(&irq_mapping_update_lock); return irq; } +#endif static void unbind_from_irq(unsigned int irq) { @@ -477,6 +517,7 @@ static void unbind_from_irq(unsigned int unsigned int cpu; int evtchn = evtchn_from_irq(irq); + BUG_IF_IPI(irq); spin_lock(&irq_mapping_update_lock); if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { @@ -490,10 +531,12 @@ static void unbind_from_irq(unsigned int per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) [index_from_irq(irq)] = -1; break; +#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ) case IRQT_IPI: per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) [index_from_irq(irq)] = -1; break; +#endif default: break; } @@ -512,6 +555,46 @@ static void unbind_from_irq(unsigned int spin_unlock(&irq_mapping_update_lock); } +#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ) +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu) +{ + struct evtchn_close close; + int evtchn = evtchn_from_per_cpu_irq(irq, cpu); + + spin_lock(&irq_mapping_update_lock); + + if (VALID_EVTCHN(evtchn)) { + struct irq_desc *desc = irq_desc + irq; + + mask_evtchn(evtchn); + + BUG_ON(irq_bindcount[irq] <= 1); + irq_bindcount[irq]--; + cpu_clear(cpu, desc->affinity); + + close.port = evtchn; + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) + BUG(); + + switch (type_from_irq(irq)) { + case IRQT_IPI: + per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = -1; + break; + default: + BUG(); + break; + } + + /* Closed ports are implicitly re-bound to VCPU0. */ + bind_evtchn_to_cpu(evtchn, 0); + + evtchn_to_irq[evtchn] = -1; + } + + spin_unlock(&irq_mapping_update_lock); +} +#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */ + int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, @@ -606,6 +689,8 @@ int bind_virq_to_irqhandler( } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); +#ifdef CONFIG_SMP +#ifdef PER_CPU_IPI_IRQ int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, @@ -628,7 +713,72 @@ int bind_ipi_to_irqhandler( return irq; } -EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler); +#else +int __cpuinit bind_ipi_to_irqaction( + unsigned int ipi, + unsigned int cpu, + struct irqaction *action) +{ + struct evtchn_bind_ipi bind_ipi; + int evtchn, irq, retval = 0; + + spin_lock(&irq_mapping_update_lock); + + if (per_cpu(ipi_to_evtchn, cpu)[ipi] != -1) { + spin_unlock(&irq_mapping_update_lock); + return -EBUSY; + } + + if ((irq = ipi_to_irq[ipi]) == -1) { + if ((irq = find_unbound_irq()) < 0) { + spin_unlock(&irq_mapping_update_lock); + return irq; + } + + /* Extra reference so count will never drop to zero. */ + irq_bindcount[irq]++; + + ipi_to_irq[ipi] = irq; + irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, 0); + irq_desc[irq].handle_irq = handle_percpu_irq; + retval = 1; + } + + bind_ipi.vcpu = cpu; + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, + &bind_ipi) != 0) + BUG(); + + evtchn = bind_ipi.port; + evtchn_to_irq[evtchn] = irq; + per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn; + + bind_evtchn_to_cpu(evtchn, cpu); + + irq_bindcount[irq]++; + + spin_unlock(&irq_mapping_update_lock); + + if (retval == 0) { + unsigned long flags; + + local_irq_save(flags); + unmask_evtchn(evtchn); + local_irq_restore(flags); + } else { + action->flags |= IRQF_PERCPU; + retval = setup_irq(irq, action); + if (retval) { + unbind_from_per_cpu_irq(irq, cpu); + BUG_ON(retval > 0); + irq = retval; + } + } + + return irq; +} +#endif /* PER_CPU_IPI_IRQ */ +#endif /* CONFIG_SMP */ void unbind_from_irqhandler(unsigned int irq, void *dev_id) { @@ -654,6 +804,7 @@ static void rebind_irq_to_cpu(unsigned i { int evtchn = evtchn_from_irq(irq); + BUG_IF_IPI(irq); if (VALID_EVTCHN(evtchn)) rebind_evtchn_to_cpu(evtchn, tcpu); } @@ -737,6 +888,7 @@ static struct irq_chip dynirq_chip = { .unmask = unmask_dynirq, .mask_ack = ack_dynirq, .ack = ack_dynirq, + .eoi = end_dynirq, .end = end_dynirq, #ifdef CONFIG_SMP .set_affinity = set_affinity_irq, @@ -909,10 +1061,21 @@ int irq_ignore_unhandled(unsigned int ir return !!(irq_status.flags & XENIRQSTAT_shared); } +#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ) +void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu) +{ + int evtchn = evtchn_from_per_cpu_irq(ipi_to_irq[ipi], cpu); + + if (VALID_EVTCHN(evtchn)) + notify_remote_via_evtchn(evtchn); +} +#endif + void notify_remote_via_irq(int irq) { int evtchn = evtchn_from_irq(irq); + BUG_IF_IPI(irq); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } @@ -920,6 +1083,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq) int irq_to_evtchn_port(int irq) { + BUG_IF_IPI(irq); return evtchn_from_irq(irq); } EXPORT_SYMBOL_GPL(irq_to_evtchn_port); @@ -1035,11 +1199,16 @@ static void restore_cpu_virqs(unsigned i static void restore_cpu_ipis(unsigned int cpu) { +#ifdef CONFIG_SMP struct evtchn_bind_ipi bind_ipi; int ipi, irq, evtchn; for (ipi = 0; ipi < NR_IPIS; ipi++) { +#ifdef PER_CPU_IPI_IRQ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) +#else + if ((irq = ipi_to_irq[ipi]) == -1) +#endif continue; BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0)); @@ -1053,13 +1222,17 @@ static void restore_cpu_ipis(unsigned in /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; +#ifdef PER_CPU_IPI_IRQ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); +#else + per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn; +#endif bind_evtchn_to_cpu(evtchn, cpu); /* Ready for use. */ unmask_evtchn(evtchn); - } +#endif } static int evtchn_resume(struct sys_device *dev) @@ -1103,8 +1276,17 @@ static int evtchn_resume(struct sys_devi for_each_possible_cpu(cpu) { restore_cpu_virqs(cpu); +#ifdef PER_CPU_IPI_IRQ restore_cpu_ipis(cpu); +#else + /* No IPI <-> event-channel mappings. */ + for (irq = 0; irq < NR_IPIS; ++irq) + per_cpu(ipi_to_evtchn, cpu)[irq] = -1; +#endif } +#ifndef PER_CPU_IPI_IRQ + restore_cpu_ipis(smp_processor_id()); +#endif return 0; } Index: head-2008-12-01/drivers/xen/core/smpboot.c =================================================================== --- head-2008-12-01.orig/drivers/xen/core/smpboot.c 2008-12-01 12:07:15.000000000 +0100 +++ head-2008-12-01/drivers/xen/core/smpboot.c 2008-12-01 12:07:34.000000000 +0100 @@ -53,12 +53,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); DEFINE_PER_CPU(int, cpu_state) = { 0 }; #endif -static DEFINE_PER_CPU(int, resched_irq); -static DEFINE_PER_CPU(int, callfunc_irq); -static DEFINE_PER_CPU(int, call1func_irq); -static char resched_name[NR_CPUS][15]; -static char callfunc_name[NR_CPUS][15]; -static char call1func_name[NR_CPUS][15]; +static int __read_mostly resched_irq = -1; +static int __read_mostly callfunc_irq = -1; +static int __read_mostly call1func_irq = -1; #ifdef CONFIG_X86_LOCAL_APIC #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid)) @@ -117,43 +114,50 @@ remove_siblinginfo(unsigned int cpu) static int __cpuinit xen_smp_intr_init(unsigned int cpu) { + static struct irqaction resched_action = { + .handler = smp_reschedule_interrupt, + .flags = IRQF_DISABLED, + .name = "resched" + }, callfunc_action = { + .handler = smp_call_function_interrupt, + .flags = IRQF_DISABLED, + .name = "callfunc" + }, call1func_action = { + .handler = smp_call_function_single_interrupt, + .flags = IRQF_DISABLED, + .name = "call1func" + }; int rc; - per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = - per_cpu(call1func_irq, cpu) = -1; - - sprintf(resched_name[cpu], "resched%u", cpu); - rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, - cpu, - smp_reschedule_interrupt, - IRQF_DISABLED|IRQF_NOBALANCING, - resched_name[cpu], - NULL); + rc = bind_ipi_to_irqaction(RESCHEDULE_VECTOR, + cpu, + &resched_action); if (rc < 0) goto fail; - per_cpu(resched_irq, cpu) = rc; - - sprintf(callfunc_name[cpu], "callfunc%u", cpu); - rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR, - cpu, - smp_call_function_interrupt, - IRQF_DISABLED|IRQF_NOBALANCING, - callfunc_name[cpu], - NULL); + if (resched_irq < 0) + resched_irq = rc; + else + BUG_ON(resched_irq != rc); + + rc = bind_ipi_to_irqaction(CALL_FUNCTION_VECTOR, + cpu, + &callfunc_action); if (rc < 0) goto fail; - per_cpu(callfunc_irq, cpu) = rc; - - sprintf(call1func_name[cpu], "call1func%u", cpu); - rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR, - cpu, - smp_call_function_single_interrupt, - IRQF_DISABLED|IRQF_NOBALANCING, - call1func_name[cpu], - NULL); + if (callfunc_irq < 0) + callfunc_irq = rc; + else + BUG_ON(callfunc_irq != rc); + + rc = bind_ipi_to_irqaction(CALL_FUNC_SINGLE_VECTOR, + cpu, + &call1func_action); if (rc < 0) goto fail; - per_cpu(call1func_irq, cpu) = rc; + if (call1func_irq < 0) + call1func_irq = rc; + else + BUG_ON(call1func_irq != rc); rc = xen_spinlock_init(cpu); if (rc < 0) @@ -165,12 +169,12 @@ static int __cpuinit xen_smp_intr_init(u return 0; fail: - if (per_cpu(resched_irq, cpu) >= 0) - unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); - if (per_cpu(callfunc_irq, cpu) >= 0) - unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); - if (per_cpu(call1func_irq, cpu) >= 0) - unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); + if (resched_irq >= 0) + unbind_from_per_cpu_irq(resched_irq, cpu); + if (callfunc_irq >= 0) + unbind_from_per_cpu_irq(callfunc_irq, cpu); + if (call1func_irq >= 0) + unbind_from_per_cpu_irq(call1func_irq, cpu); xen_spinlock_cleanup(cpu); return rc; } @@ -181,9 +185,9 @@ static void __cpuinit xen_smp_intr_exit( if (cpu != 0) local_teardown_timer(cpu); - unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); - unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL); + unbind_from_per_cpu_irq(resched_irq, cpu); + unbind_from_per_cpu_irq(callfunc_irq, cpu); + unbind_from_per_cpu_irq(call1func_irq, cpu); xen_spinlock_cleanup(cpu); } #endif Index: head-2008-12-01/drivers/xen/core/spinlock.c =================================================================== --- head-2008-12-01.orig/drivers/xen/core/spinlock.c 2008-12-01 11:51:53.000000000 +0100 +++ head-2008-12-01/drivers/xen/core/spinlock.c 2008-12-01 12:07:34.000000000 +0100 @@ -14,8 +14,7 @@ extern irqreturn_t smp_reschedule_interrupt(int, void *); -static DEFINE_PER_CPU(int, spinlock_irq) = -1; -static char spinlock_name[NR_CPUS][15]; +static int __read_mostly spinlock_irq = -1; struct spinning { raw_spinlock_t *lock; @@ -32,34 +31,37 @@ static DEFINE_PER_CPU(raw_rwlock_t, spin int __cpuinit xen_spinlock_init(unsigned int cpu) { + static struct irqaction spinlock_action = { + .handler = smp_reschedule_interrupt, + .flags = IRQF_DISABLED, + .name = "spinlock" + }; int rc; - sprintf(spinlock_name[cpu], "spinlock%u", cpu); - rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR, - cpu, - smp_reschedule_interrupt, - IRQF_DISABLED|IRQF_NOBALANCING, - spinlock_name[cpu], - NULL); + rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR, + cpu, + &spinlock_action); if (rc < 0) return rc; - disable_irq(rc); /* make sure it's never delivered */ - per_cpu(spinlock_irq, cpu) = rc; + if (spinlock_irq < 0) { + disable_irq(rc); /* make sure it's never delivered */ + spinlock_irq = rc; + } else + BUG_ON(spinlock_irq != rc); return 0; } void __cpuinit xen_spinlock_cleanup(unsigned int cpu) { - if (per_cpu(spinlock_irq, cpu) >= 0) - unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL); - per_cpu(spinlock_irq, cpu) = -1; + if (spinlock_irq >= 0) + unbind_from_per_cpu_irq(spinlock_irq, cpu); } int xen_spin_wait(raw_spinlock_t *lock, unsigned int token) { - int rc = 0, irq = __get_cpu_var(spinlock_irq); + int rc = 0, irq = spinlock_irq; raw_rwlock_t *rm_lock; unsigned long flags; struct spinning spinning; @@ -153,7 +155,7 @@ void xen_spin_kick(raw_spinlock_t *lock, raw_local_irq_restore(flags); if (unlikely(spinning)) { - notify_remote_via_irq(per_cpu(spinlock_irq, cpu)); + notify_remote_via_ipi(SPIN_UNLOCK_VECTOR, cpu); return; } } Index: head-2008-12-01/include/xen/evtchn.h =================================================================== --- head-2008-12-01.orig/include/xen/evtchn.h 2008-12-01 12:07:30.000000000 +0100 +++ head-2008-12-01/include/xen/evtchn.h 2008-12-01 12:07:34.000000000 +0100 @@ -78,6 +78,8 @@ int bind_virq_to_irqhandler( unsigned long irqflags, const char *devname, void *dev_id); +#if defined(CONFIG_SMP) && !defined(MODULE) +#ifndef CONFIG_X86 int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, @@ -85,6 +87,13 @@ int bind_ipi_to_irqhandler( unsigned long irqflags, const char *devname, void *dev_id); +#else +int bind_ipi_to_irqaction( + unsigned int ipi, + unsigned int cpu, + struct irqaction *action); +#endif +#endif /* * Common unbind function for all event sources. Takes IRQ to unbind from. @@ -93,6 +102,11 @@ int bind_ipi_to_irqhandler( */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); +#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) +/* Specialized unbind function for per-CPU IRQs. */ +void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu); +#endif + #ifndef CONFIG_XEN void irq_resume(void); #endif @@ -184,4 +198,8 @@ int clear_pirq_hw_action(int pirq); #define PIRQ_END 5 #define PIRQ_ACK 6 +#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) +void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); +#endif + #endif /* __ASM_EVTCHN_H__ */