]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.xen/xen-ipi-per-cpu-irq
Imported linux-2.6.27.39 suse/xen patches.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.xen / xen-ipi-per-cpu-irq
diff --git a/src/patches/suse-2.6.27.31/patches.xen/xen-ipi-per-cpu-irq b/src/patches/suse-2.6.27.31/patches.xen/xen-ipi-per-cpu-irq
deleted file mode 100644 (file)
index d950a1f..0000000
+++ /dev/null
@@ -1,778 +0,0 @@
-From: jbeulich@novell.com
-Subject: fold IPIs onto a single IRQ each
-Patch-mainline: obsolete
-
---- sle11-2009-06-04.orig/arch/x86/kernel/genapic_xen_64.c     2009-06-04 10:46:34.000000000 +0200
-+++ sle11-2009-06-04/arch/x86/kernel/genapic_xen_64.c  2009-06-04 10:47:21.000000000 +0200
-@@ -25,13 +25,13 @@
- #include <asm/genapic.h>
- #include <xen/evtchn.h>
--DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
--
- static inline void __send_IPI_one(unsigned int cpu, int vector)
- {
--      int irq = per_cpu(ipi_to_irq, cpu)[vector];
--      BUG_ON(irq < 0);
--      notify_remote_via_irq(irq);
-+#ifdef CONFIG_SMP
-+      notify_remote_via_ipi(vector, cpu);
-+#else
-+      BUG();
-+#endif
- }
- static void xen_send_IPI_shortcut(unsigned int shortcut,
---- sle11-2009-06-04.orig/arch/x86/kernel/ipi-xen.c    2009-06-04 10:46:34.000000000 +0200
-+++ sle11-2009-06-04/arch/x86/kernel/ipi-xen.c 2009-06-04 10:47:21.000000000 +0200
-@@ -48,15 +48,6 @@ static inline int __prepare_ICR2(unsigne
- }
- #else
- #include <xen/evtchn.h>
--
--DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
--
--static inline void __send_IPI_one(unsigned int cpu, int vector)
--{
--      int irq = per_cpu(ipi_to_irq, cpu)[vector];
--      BUG_ON(irq < 0);
--      notify_remote_via_irq(irq);
--}
- #endif
- void __send_IPI_shortcut(unsigned int shortcut, int vector)
-@@ -90,12 +81,12 @@ void __send_IPI_shortcut(unsigned int sh
-       switch (shortcut) {
-       case APIC_DEST_SELF:
--              __send_IPI_one(smp_processor_id(), vector);
-+              notify_remote_via_ipi(vector, smp_processor_id());
-               break;
-       case APIC_DEST_ALLBUT:
-               for_each_online_cpu(cpu)
-                       if (cpu != smp_processor_id())
--                              __send_IPI_one(cpu, vector);
-+                              notify_remote_via_ipi(vector, cpu);
-               break;
-       default:
-               printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-@@ -165,7 +156,7 @@ void send_IPI_mask_bitmask(const cpumask
-       WARN_ON(!cpus_subset(*cpumask, cpu_online_map));
-       for_each_online_cpu(cpu)
-               if (cpu_isset(cpu, *cpumask))
--                      __send_IPI_one(cpu, vector);
-+                      notify_remote_via_ipi(vector, cpu);
- #endif
-       local_irq_restore(flags);
- }
---- sle11-2009-06-04.orig/arch/x86/kernel/irq_32-xen.c 2009-06-04 10:21:39.000000000 +0200
-+++ sle11-2009-06-04/arch/x86/kernel/irq_32-xen.c      2009-06-04 10:47:21.000000000 +0200
-@@ -404,6 +404,9 @@ void fixup_irqs(cpumask_t map)
-               if (irq == 2)
-                       continue;
-+              if (irq_desc[irq].status & IRQ_PER_CPU)
-+                      continue;
-+
-               cpus_and(mask, irq_desc[irq].affinity, map);
-               if (any_online_cpu(mask) == NR_CPUS) {
-                       /*printk("Breaking affinity for irq %i\n", irq);*/
---- sle11-2009-06-04.orig/arch/x86/kernel/irq_64-xen.c 2009-06-04 10:21:39.000000000 +0200
-+++ sle11-2009-06-04/arch/x86/kernel/irq_64-xen.c      2009-06-04 10:47:21.000000000 +0200
-@@ -245,6 +245,7 @@ void fixup_irqs(cpumask_t map)
-               spin_lock(&irq_desc[irq].lock);
-               if (!irq_has_action(irq) ||
-+                  (irq_desc[irq].status & IRQ_PER_CPU) ||
-                   cpus_equal(irq_desc[irq].affinity, map)) {
-                       spin_unlock(&irq_desc[irq].lock);
-                       continue;
---- sle11-2009-06-04.orig/drivers/xen/Kconfig  2009-06-04 10:47:17.000000000 +0200
-+++ sle11-2009-06-04/drivers/xen/Kconfig       2009-06-04 10:47:21.000000000 +0200
-@@ -4,6 +4,7 @@
- config XEN
-       bool
-+      select IRQ_PER_CPU if SMP
- if XEN
- config XEN_INTERFACE_VERSION
-@@ -292,6 +293,9 @@ config HAVE_IRQ_IGNORE_UNHANDLED
- config GENERIC_HARDIRQS_NO__DO_IRQ
-       def_bool y
-+config IRQ_PER_CPU
-+      bool
-+
- config NO_IDLE_HZ
-       def_bool y
---- sle11-2009-06-04.orig/drivers/xen/core/evtchn.c    2009-06-04 10:47:20.000000000 +0200
-+++ sle11-2009-06-04/drivers/xen/core/evtchn.c 2009-06-04 10:47:21.000000000 +0200
-@@ -58,6 +58,22 @@ static DEFINE_SPINLOCK(irq_mapping_updat
- static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
-       [0 ...  NR_EVENT_CHANNELS-1] = -1 };
-+/* IRQ <-> IPI mapping. */
-+#ifndef NR_IPIS
-+#define NR_IPIS 1
-+#endif
-+#if defined(CONFIG_SMP) && defined(CONFIG_X86)
-+static int ipi_to_irq[NR_IPIS] __read_mostly = {[0 ... NR_IPIS-1] = -1};
-+static DEFINE_PER_CPU(int[NR_IPIS], ipi_to_evtchn);
-+#else
-+#define PER_CPU_IPI_IRQ
-+#endif
-+#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
-+#define BUG_IF_IPI(irq) BUG_ON(type_from_irq(irq) == IRQT_IPI)
-+#else
-+#define BUG_IF_IPI(irq) ((void)(irq))
-+#endif
-+
- /* Packed IRQ information: binding type, sub-type index, and event channel. */
- static u32 irq_info[NR_IRQS];
-@@ -98,10 +114,12 @@ static inline u32 mk_irq_info(u32 type, 
-  * Accessors for packed IRQ information.
-  */
-+#ifdef PER_CPU_IPI_IRQ
- static inline unsigned int evtchn_from_irq(int irq)
- {
-       return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
- }
-+#endif
- static inline unsigned int index_from_irq(int irq)
- {
-@@ -113,14 +131,28 @@ static inline unsigned int type_from_irq
-       return irq_info[irq] >> (32 - _IRQT_BITS);
- }
-+#ifndef PER_CPU_IPI_IRQ
-+static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
-+{
-+      BUG_ON(type_from_irq(irq) != IRQT_IPI);
-+      return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)];
-+}
-+
-+static inline unsigned int evtchn_from_irq(unsigned int irq)
-+{
-+      if (type_from_irq(irq) != IRQT_IPI)
-+              return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
-+      return evtchn_from_per_cpu_irq(irq, smp_processor_id());
-+}
-+#endif
-+
- /* IRQ <-> VIRQ mapping. */
- DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
-+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
- /* IRQ <-> IPI mapping. */
--#ifndef NR_IPIS
--#define NR_IPIS 1
--#endif
- DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
-+#endif
- /* Reference counts for bindings to IRQs. */
- static int irq_bindcount[NR_IRQS];
-@@ -145,8 +177,14 @@ static void bind_evtchn_to_cpu(unsigned 
-       BUG_ON(!test_bit(chn, s->evtchn_mask));
--      if (irq != -1)
--              irq_desc[irq].affinity = cpumask_of_cpu(cpu);
-+      if (irq != -1) {
-+              struct irq_desc *desc = irq_desc + irq;
-+
-+              if (!(desc->status & IRQ_PER_CPU))
-+                      desc->affinity = cpumask_of_cpu(cpu);
-+              else
-+                      cpu_set(cpu, desc->affinity);
-+      }
-       clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
-       set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
-@@ -315,14 +353,29 @@ asmlinkage void evtchn_do_upcall(struct 
-       irq_exit();
- }
--static int find_unbound_irq(void)
-+static struct irq_chip dynirq_chip;
-+
-+static int find_unbound_irq(bool percpu)
- {
-       static int warned;
-       int irq;
-       for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++)
--              if (irq_bindcount[irq] == 0)
-+              if (irq_bindcount[irq] == 0) {
-+                      irq_flow_handler_t handle;
-+                      const char *name;
-+
-+                      if (!percpu) {
-+                              handle = handle_level_irq;
-+                              name = "level";
-+                      } else {
-+                              handle = handle_percpu_irq;
-+                              name = "percpu";
-+                      }
-+                      set_irq_chip_and_handler_name(irq, &dynirq_chip,
-+                                                    handle, name);
-                       return irq;
-+              }
-       if (!warned) {
-               warned = 1;
-@@ -340,7 +393,7 @@ static int bind_caller_port_to_irq(unsig
-       spin_lock(&irq_mapping_update_lock);
-       if ((irq = evtchn_to_irq[caller_port]) == -1) {
--              if ((irq = find_unbound_irq()) < 0)
-+              if ((irq = find_unbound_irq(false)) < 0)
-                       goto out;
-               evtchn_to_irq[caller_port] = irq;
-@@ -362,7 +415,7 @@ static int bind_local_port_to_irq(unsign
-       BUG_ON(evtchn_to_irq[local_port] != -1);
--      if ((irq = find_unbound_irq()) < 0) {
-+      if ((irq = find_unbound_irq(false)) < 0) {
-               struct evtchn_close close = { .port = local_port };
-               if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
-                       BUG();
-@@ -415,7 +468,7 @@ static int bind_virq_to_irq(unsigned int
-       spin_lock(&irq_mapping_update_lock);
-       if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
--              if ((irq = find_unbound_irq()) < 0)
-+              if ((irq = find_unbound_irq(false)) < 0)
-                       goto out;
-               bind_virq.virq = virq;
-@@ -440,6 +493,7 @@ static int bind_virq_to_irq(unsigned int
-       return irq;
- }
-+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
- static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
- {
-       struct evtchn_bind_ipi bind_ipi;
-@@ -448,7 +502,7 @@ static int bind_ipi_to_irq(unsigned int 
-       spin_lock(&irq_mapping_update_lock);
-       if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
--              if ((irq = find_unbound_irq()) < 0)
-+              if ((irq = find_unbound_irq(false)) < 0)
-                       goto out;
-               bind_ipi.vcpu = cpu;
-@@ -471,6 +525,7 @@ static int bind_ipi_to_irq(unsigned int 
-       spin_unlock(&irq_mapping_update_lock);
-       return irq;
- }
-+#endif
- static void unbind_from_irq(unsigned int irq)
- {
-@@ -478,6 +533,7 @@ static void unbind_from_irq(unsigned int
-       unsigned int cpu;
-       int evtchn = evtchn_from_irq(irq);
-+      BUG_IF_IPI(irq);
-       spin_lock(&irq_mapping_update_lock);
-       if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
-@@ -491,10 +547,12 @@ static void unbind_from_irq(unsigned int
-                       per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-                               [index_from_irq(irq)] = -1;
-                       break;
-+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
-               case IRQT_IPI:
-                       per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-                               [index_from_irq(irq)] = -1;
-                       break;
-+#endif
-               default:
-                       break;
-               }
-@@ -513,6 +571,46 @@ static void unbind_from_irq(unsigned int
-       spin_unlock(&irq_mapping_update_lock);
- }
-+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
-+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
-+{
-+      struct evtchn_close close;
-+      int evtchn = evtchn_from_per_cpu_irq(irq, cpu);
-+
-+      spin_lock(&irq_mapping_update_lock);
-+
-+      if (VALID_EVTCHN(evtchn)) {
-+              struct irq_desc *desc = irq_desc + irq;
-+
-+              mask_evtchn(evtchn);
-+
-+              BUG_ON(irq_bindcount[irq] <= 1);
-+              irq_bindcount[irq]--;
-+              cpu_clear(cpu, desc->affinity);
-+
-+              close.port = evtchn;
-+              if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
-+                      BUG();
-+
-+              switch (type_from_irq(irq)) {
-+              case IRQT_IPI:
-+                      per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = 0;
-+                      break;
-+              default:
-+                      BUG();
-+                      break;
-+              }
-+
-+              /* Closed ports are implicitly re-bound to VCPU0. */
-+              bind_evtchn_to_cpu(evtchn, 0);
-+
-+              evtchn_to_irq[evtchn] = -1;
-+      }
-+
-+      spin_unlock(&irq_mapping_update_lock);
-+}
-+#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */
-+
- int bind_caller_port_to_irqhandler(
-       unsigned int caller_port,
-       irq_handler_t handler,
-@@ -607,6 +705,8 @@ int bind_virq_to_irqhandler(
- }
- EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
-+#ifdef CONFIG_SMP
-+#ifdef PER_CPU_IPI_IRQ
- int bind_ipi_to_irqhandler(
-       unsigned int ipi,
-       unsigned int cpu,
-@@ -629,7 +729,71 @@ int bind_ipi_to_irqhandler(
-       return irq;
- }
--EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
-+#else
-+int __cpuinit bind_ipi_to_irqaction(
-+      unsigned int ipi,
-+      unsigned int cpu,
-+      struct irqaction *action)
-+{
-+      struct evtchn_bind_ipi bind_ipi;
-+      int evtchn, irq, retval = 0;
-+
-+      spin_lock(&irq_mapping_update_lock);
-+
-+      if (VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi])) {
-+              spin_unlock(&irq_mapping_update_lock);
-+              return -EBUSY;
-+      }
-+
-+      if ((irq = ipi_to_irq[ipi]) == -1) {
-+              if ((irq = find_unbound_irq(true)) < 0) {
-+                      spin_unlock(&irq_mapping_update_lock);
-+                      return irq;
-+              }
-+
-+              /* Extra reference so count will never drop to zero. */
-+              irq_bindcount[irq]++;
-+
-+              ipi_to_irq[ipi] = irq;
-+              irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, 0);
-+              retval = 1;
-+      }
-+
-+      bind_ipi.vcpu = cpu;
-+      if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
-+                                      &bind_ipi) != 0)
-+              BUG();
-+
-+      evtchn = bind_ipi.port;
-+      evtchn_to_irq[evtchn] = irq;
-+      per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
-+
-+      bind_evtchn_to_cpu(evtchn, cpu);
-+
-+      irq_bindcount[irq]++;
-+
-+      spin_unlock(&irq_mapping_update_lock);
-+
-+      if (retval == 0) {
-+              unsigned long flags;
-+
-+              local_irq_save(flags);
-+              unmask_evtchn(evtchn);
-+              local_irq_restore(flags);
-+      } else {
-+              action->flags |= IRQF_PERCPU;
-+              retval = setup_irq(irq, action);
-+              if (retval) {
-+                      unbind_from_per_cpu_irq(irq, cpu);
-+                      BUG_ON(retval > 0);
-+                      irq = retval;
-+              }
-+      }
-+
-+      return irq;
-+}
-+#endif /* PER_CPU_IPI_IRQ */
-+#endif /* CONFIG_SMP */
- void unbind_from_irqhandler(unsigned int irq, void *dev_id)
- {
-@@ -655,6 +819,7 @@ static void rebind_irq_to_cpu(unsigned i
- {
-       int evtchn = evtchn_from_irq(irq);
-+      BUG_IF_IPI(irq);
-       if (VALID_EVTCHN(evtchn))
-               rebind_evtchn_to_cpu(evtchn, tcpu);
- }
-@@ -739,6 +904,7 @@ static struct irq_chip dynirq_chip = {
-       .unmask   = unmask_dynirq,
-       .mask_ack = ack_dynirq,
-       .ack      = ack_dynirq,
-+      .eoi      = end_dynirq,
-       .end      = end_dynirq,
- #ifdef CONFIG_SMP
-       .set_affinity = set_affinity_irq,
-@@ -918,10 +1084,21 @@ int irq_ignore_unhandled(unsigned int ir
-       return !!(irq_status.flags & XENIRQSTAT_shared);
- }
-+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
-+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
-+{
-+      int evtchn = evtchn_from_per_cpu_irq(ipi_to_irq[ipi], cpu);
-+
-+      if (VALID_EVTCHN(evtchn))
-+              notify_remote_via_evtchn(evtchn);
-+}
-+#endif
-+
- void notify_remote_via_irq(int irq)
- {
-       int evtchn = evtchn_from_irq(irq);
-+      BUG_IF_IPI(irq);
-       if (VALID_EVTCHN(evtchn))
-               notify_remote_via_evtchn(evtchn);
- }
-@@ -929,6 +1106,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq)
- int irq_to_evtchn_port(int irq)
- {
-+      BUG_IF_IPI(irq);
-       return evtchn_from_irq(irq);
- }
- EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
-@@ -1044,11 +1222,17 @@ static void restore_cpu_virqs(unsigned i
- static void restore_cpu_ipis(unsigned int cpu)
- {
-+#ifdef CONFIG_SMP
-       struct evtchn_bind_ipi bind_ipi;
-       int ipi, irq, evtchn;
-       for (ipi = 0; ipi < NR_IPIS; ipi++) {
-+#ifdef PER_CPU_IPI_IRQ
-               if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
-+#else
-+              if ((irq = ipi_to_irq[ipi]) == -1
-+                  || !VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi]))
-+#endif
-                       continue;
-               BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-@@ -1062,13 +1246,18 @@ static void restore_cpu_ipis(unsigned in
-               /* Record the new mapping. */
-               evtchn_to_irq[evtchn] = irq;
-+#ifdef PER_CPU_IPI_IRQ
-               irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+#else
-+              per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
-+#endif
-               bind_evtchn_to_cpu(evtchn, cpu);
-               /* Ready for use. */
-               if (!(irq_desc[irq].status & IRQ_DISABLED))
-                       unmask_evtchn(evtchn);
-       }
-+#endif
- }
- static int evtchn_resume(struct sys_device *dev)
-@@ -1239,8 +1428,6 @@ void __init xen_init_IRQ(void)
-               irq_bindcount[i] = 0;
-               irq_desc[i].status |= IRQ_NOPROBE;
--              set_irq_chip_and_handler_name(i, &dynirq_chip,
--                                            handle_level_irq, "level");
-       }
-       /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
---- sle11-2009-06-04.orig/drivers/xen/core/smpboot.c   2009-06-04 10:47:07.000000000 +0200
-+++ sle11-2009-06-04/drivers/xen/core/smpboot.c        2009-06-04 10:47:21.000000000 +0200
-@@ -49,12 +49,9 @@ cpumask_t cpu_initialized_map;
- DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info);
- EXPORT_PER_CPU_SYMBOL(cpu_info);
--static DEFINE_PER_CPU(int, resched_irq);
--static DEFINE_PER_CPU(int, callfunc_irq);
--static DEFINE_PER_CPU(int, call1func_irq);
--static char resched_name[NR_CPUS][15];
--static char callfunc_name[NR_CPUS][15];
--static char call1func_name[NR_CPUS][15];
-+static int __read_mostly resched_irq = -1;
-+static int __read_mostly callfunc_irq = -1;
-+static int __read_mostly call1func_irq = -1;
- #ifdef CONFIG_X86_LOCAL_APIC
- #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid))
-@@ -109,47 +106,54 @@ remove_siblinginfo(unsigned int cpu)
- static int __cpuinit xen_smp_intr_init(unsigned int cpu)
- {
-+      static struct irqaction resched_action = {
-+              .handler = smp_reschedule_interrupt,
-+              .flags   = IRQF_DISABLED,
-+              .name    = "resched"
-+      }, callfunc_action = {
-+              .handler = smp_call_function_interrupt,
-+              .flags   = IRQF_DISABLED,
-+              .name    = "callfunc"
-+      }, call1func_action = {
-+              .handler = smp_call_function_single_interrupt,
-+              .flags   = IRQF_DISABLED,
-+              .name    = "call1func"
-+      };
-       int rc;
--      per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) =
--              per_cpu(call1func_irq, cpu) = -1;
--
--      sprintf(resched_name[cpu], "resched%u", cpu);
--      rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
--                                  cpu,
--                                  smp_reschedule_interrupt,
--                                  IRQF_DISABLED|IRQF_NOBALANCING,
--                                  resched_name[cpu],
--                                  NULL);
-+      rc = bind_ipi_to_irqaction(RESCHEDULE_VECTOR,
-+                                 cpu,
-+                                 &resched_action);
-       if (rc < 0)
--              goto fail;
--      per_cpu(resched_irq, cpu) = rc;
--
--      sprintf(callfunc_name[cpu], "callfunc%u", cpu);
--      rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
--                                  cpu,
--                                  smp_call_function_interrupt,
--                                  IRQF_DISABLED|IRQF_NOBALANCING,
--                                  callfunc_name[cpu],
--                                  NULL);
-+              return rc;
-+      if (resched_irq < 0)
-+              resched_irq = rc;
-+      else
-+              BUG_ON(resched_irq != rc);
-+
-+      rc = bind_ipi_to_irqaction(CALL_FUNCTION_VECTOR,
-+                                 cpu,
-+                                 &callfunc_action);
-       if (rc < 0)
--              goto fail;
--      per_cpu(callfunc_irq, cpu) = rc;
--
--      sprintf(call1func_name[cpu], "call1func%u", cpu);
--      rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR,
--                                  cpu,
--                                  smp_call_function_single_interrupt,
--                                  IRQF_DISABLED|IRQF_NOBALANCING,
--                                  call1func_name[cpu],
--                                  NULL);
-+              goto unbind_resched;
-+      if (callfunc_irq < 0)
-+              callfunc_irq = rc;
-+      else
-+              BUG_ON(callfunc_irq != rc);
-+
-+      rc = bind_ipi_to_irqaction(CALL_FUNC_SINGLE_VECTOR,
-+                                 cpu,
-+                                 &call1func_action);
-       if (rc < 0)
--              goto fail;
--      per_cpu(call1func_irq, cpu) = rc;
-+              goto unbind_call;
-+      if (call1func_irq < 0)
-+              call1func_irq = rc;
-+      else
-+              BUG_ON(call1func_irq != rc);
-       rc = xen_spinlock_init(cpu);
-       if (rc < 0)
--              goto fail;
-+              goto unbind_call1;
-       if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
-               goto fail;
-@@ -157,13 +161,13 @@ static int __cpuinit xen_smp_intr_init(u
-       return 0;
-  fail:
--      if (per_cpu(resched_irq, cpu) >= 0)
--              unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
--      if (per_cpu(callfunc_irq, cpu) >= 0)
--              unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
--      if (per_cpu(call1func_irq, cpu) >= 0)
--              unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
-       xen_spinlock_cleanup(cpu);
-+ unbind_call1:
-+      unbind_from_per_cpu_irq(call1func_irq, cpu);
-+ unbind_call:
-+      unbind_from_per_cpu_irq(callfunc_irq, cpu);
-+ unbind_resched:
-+      unbind_from_per_cpu_irq(resched_irq, cpu);
-       return rc;
- }
-@@ -173,9 +177,9 @@ static void __cpuinit xen_smp_intr_exit(
-       if (cpu != 0)
-               local_teardown_timer(cpu);
--      unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
--      unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
--      unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
-+      unbind_from_per_cpu_irq(resched_irq, cpu);
-+      unbind_from_per_cpu_irq(callfunc_irq, cpu);
-+      unbind_from_per_cpu_irq(call1func_irq, cpu);
-       xen_spinlock_cleanup(cpu);
- }
- #endif
---- sle11-2009-06-04.orig/drivers/xen/core/spinlock.c  2009-06-04 10:36:24.000000000 +0200
-+++ sle11-2009-06-04/drivers/xen/core/spinlock.c       2009-06-04 10:47:21.000000000 +0200
-@@ -16,8 +16,7 @@
- extern irqreturn_t smp_reschedule_interrupt(int, void *);
--static DEFINE_PER_CPU(int, spinlock_irq) = -1;
--static char spinlock_name[NR_CPUS][15];
-+static int __read_mostly spinlock_irq = -1;
- struct spinning {
-       raw_spinlock_t *lock;
-@@ -34,34 +33,36 @@ static DEFINE_PER_CPU(raw_rwlock_t, spin
- int __cpuinit xen_spinlock_init(unsigned int cpu)
- {
-+      static struct irqaction spinlock_action = {
-+              .handler = smp_reschedule_interrupt,
-+              .flags   = IRQF_DISABLED,
-+              .name    = "spinlock"
-+      };
-       int rc;
--      sprintf(spinlock_name[cpu], "spinlock%u", cpu);
--      rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR,
--                                  cpu,
--                                  smp_reschedule_interrupt,
--                                  IRQF_DISABLED|IRQF_NOBALANCING,
--                                  spinlock_name[cpu],
--                                  NULL);
-+      rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR,
-+                                 cpu,
-+                                 &spinlock_action);
-       if (rc < 0)
-               return rc;
--      disable_irq(rc); /* make sure it's never delivered */
--      per_cpu(spinlock_irq, cpu) = rc;
-+      if (spinlock_irq < 0) {
-+              disable_irq(rc); /* make sure it's never delivered */
-+              spinlock_irq = rc;
-+      } else
-+              BUG_ON(spinlock_irq != rc);
-       return 0;
- }
- void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
- {
--      if (per_cpu(spinlock_irq, cpu) >= 0)
--              unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL);
--      per_cpu(spinlock_irq, cpu) = -1;
-+      unbind_from_per_cpu_irq(spinlock_irq, cpu);
- }
- int xen_spin_wait(raw_spinlock_t *lock, unsigned int token)
- {
--      int rc = 0, irq = __get_cpu_var(spinlock_irq);
-+      int rc = 0, irq = spinlock_irq;
-       raw_rwlock_t *rm_lock;
-       unsigned long flags;
-       struct spinning spinning;
-@@ -155,7 +156,7 @@ void xen_spin_kick(raw_spinlock_t *lock,
-               raw_local_irq_restore(flags);
-               if (unlikely(spinning)) {
--                      notify_remote_via_irq(per_cpu(spinlock_irq, cpu));
-+                      notify_remote_via_ipi(SPIN_UNLOCK_VECTOR, cpu);
-                       return;
-               }
-       }
---- sle11-2009-06-04.orig/include/xen/evtchn.h 2009-06-04 10:47:20.000000000 +0200
-+++ sle11-2009-06-04/include/xen/evtchn.h      2009-06-04 10:47:21.000000000 +0200
-@@ -78,6 +78,8 @@ int bind_virq_to_irqhandler(
-       unsigned long irqflags,
-       const char *devname,
-       void *dev_id);
-+#if defined(CONFIG_SMP) && !defined(MODULE)
-+#ifndef CONFIG_X86
- int bind_ipi_to_irqhandler(
-       unsigned int ipi,
-       unsigned int cpu,
-@@ -85,6 +87,13 @@ int bind_ipi_to_irqhandler(
-       unsigned long irqflags,
-       const char *devname,
-       void *dev_id);
-+#else
-+int bind_ipi_to_irqaction(
-+      unsigned int ipi,
-+      unsigned int cpu,
-+      struct irqaction *action);
-+#endif
-+#endif
- /*
-  * Common unbind function for all event sources. Takes IRQ to unbind from.
-@@ -93,6 +102,11 @@ int bind_ipi_to_irqhandler(
-  */
- void unbind_from_irqhandler(unsigned int irq, void *dev_id);
-+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
-+/* Specialized unbind function for per-CPU IRQs. */
-+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu);
-+#endif
-+
- #ifndef CONFIG_XEN
- void irq_resume(void);
- #endif
-@@ -184,4 +198,8 @@ int clear_pirq_hw_action(int pirq);
- #define PIRQ_END 5
- #define PIRQ_ACK 6
-+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
-+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu);
-+#endif
-+
- #endif /* __ASM_EVTCHN_H__ */