]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blobdiff - src/patches/suse-2.6.27.31/patches.xen/746-pirq-status-page.patch
Reenabled linux-xen, added patches for Xen Kernel Version 2.6.27.31,
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.xen / 746-pirq-status-page.patch
diff --git a/src/patches/suse-2.6.27.31/patches.xen/746-pirq-status-page.patch b/src/patches/suse-2.6.27.31/patches.xen/746-pirq-status-page.patch
new file mode 100644 (file)
index 0000000..7c193d4
--- /dev/null
@@ -0,0 +1,177 @@
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg?rev/d545a95fca73
+# HG changeset 746+749+751+760 patch
+# User Keir Fraser <keir.fraser@citrix.com>
+# Date 1227879027 0
+# Node ID d545a95fca739d0b1963b73a9eb64ea64a244e76
+# Parent  2268be46c75ec6eddb7cd387af8a236a565f6140
+Subject: linux/x86: use shared page indicating the need for an EOI notification
+Patch-mainline: obsolete
+
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg?rev/cdc6729dc702
+Subject: evtchn: Fix the build.
+Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
+
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg?rev/ca213a56dba1
+Subject: evtchn, phydev: rename PHYSDEVOP_pirq_eoi_mfn to PHYSDEVOP_pirq_eoi_gmfn
+
+Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
+Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
+
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg?rev/0d10be086a78
+Subject: linux/evtchn: allocate pirq_needs_eoi bitmap dynamically
+
+Original patch from: Isaku Yamahata <yamahata@valinux.co.jp>
+Signed-off-by: Jan Beulich <jbeulich@novell.com>
+
+--- head-2008-12-15.orig/drivers/xen/core/evtchn.c     2008-11-10 11:44:21.000000000 +0100
++++ head-2008-12-15/drivers/xen/core/evtchn.c  2008-12-15 11:06:31.000000000 +0100
+@@ -35,6 +35,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+ #include <linux/kernel_stat.h>
++#include <linux/bootmem.h>
+ #include <linux/version.h>
+ #include <asm/atomic.h>
+ #include <asm/system.h>
+@@ -123,9 +124,6 @@ DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS])
+ /* Reference counts for bindings to IRQs. */
+ static int irq_bindcount[NR_IRQS];
+-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+-static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
+-
+ #ifdef CONFIG_SMP
+ static u8 cpu_evtchn[NR_EVENT_CHANNELS];
+@@ -756,16 +754,47 @@ static struct hw_interrupt_type dynirq_t
+       .retrigger = resend_irq_on_evtchn,
+ };
+-static inline void pirq_unmask_notify(int irq)
++/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
++static int pirq_eoi_does_unmask;
++static unsigned long *pirq_needs_eoi;
++
++static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
+ {
+       struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
+-      if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
+-              VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
++
++      if (pirq_eoi_does_unmask) {
++              if (test_bit(eoi.irq, pirq_needs_eoi))
++                      VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
++              else
++                      unmask_evtchn(evtchn);
++      } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
++              if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
++                      struct evtchn_unmask unmask = { .port = evtchn };
++                      struct multicall_entry mcl[2];
++
++                      mcl[0].op = __HYPERVISOR_event_channel_op;
++                      mcl[0].args[0] = EVTCHNOP_unmask;
++                      mcl[0].args[1] = (unsigned long)&unmask;
++                      mcl[1].op = __HYPERVISOR_physdev_op;
++                      mcl[1].args[0] = PHYSDEVOP_eoi;
++                      mcl[1].args[1] = (unsigned long)&eoi;
++
++                      if (HYPERVISOR_multicall(mcl, 2))
++                              BUG();
++              } else {
++                      unmask_evtchn(evtchn);
++                      VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
++              }
++      } else
++              unmask_evtchn(evtchn);
+ }
+ static inline void pirq_query_unmask(int irq)
+ {
+       struct physdev_irq_status_query irq_status;
++
++      if (pirq_eoi_does_unmask)
++              return;
+       irq_status.irq = evtchn_get_xen_pirq(irq);
+       if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
+               irq_status.flags = 0;
+@@ -806,8 +835,7 @@ static unsigned int startup_pirq(unsigne
+       irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
+  out:
+-      unmask_evtchn(evtchn);
+-      pirq_unmask_notify(irq);
++      pirq_unmask_and_notify(evtchn, irq);
+       return 0;
+ }
+@@ -859,10 +887,8 @@ static void end_pirq(unsigned int irq)
+       if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
+           (IRQ_DISABLED|IRQ_PENDING)) {
+               shutdown_pirq(irq);
+-      } else if (VALID_EVTCHN(evtchn)) {
+-              unmask_evtchn(evtchn);
+-              pirq_unmask_notify(irq);
+-      }
++      } else if (VALID_EVTCHN(evtchn))
++              pirq_unmask_and_notify(evtchn, irq);
+ }
+ static struct hw_interrupt_type pirq_type = {
+@@ -1012,6 +1038,14 @@ void irq_resume(void)
+       init_evtchn_cpu_bindings();
++      if (pirq_eoi_does_unmask) {
++              struct physdev_pirq_eoi_gmfn eoi_gmfn;
++
++              eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
++              if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn))
++                      BUG();
++      }
++
+       /* New event-channel space is not 'live' yet. */
+       for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+               mask_evtchn(evtchn);
+@@ -1098,9 +1132,16 @@ int evtchn_get_xen_pirq(int irq)
+ void __init xen_init_IRQ(void)
+ {
+       unsigned int i;
++      struct physdev_pirq_eoi_gmfn eoi_gmfn;
+       init_evtchn_cpu_bindings();
++      pirq_needs_eoi = alloc_bootmem_pages(sizeof(unsigned long)
++              * BITS_TO_LONGS(ALIGN(NR_PIRQS, PAGE_SIZE * 8)));
++      eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
++      if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0)
++              pirq_eoi_does_unmask = 1;
++
+       /* No event channels are 'live' right now. */
+       for (i = 0; i < NR_EVENT_CHANNELS; i++)
+               mask_evtchn(i);
+--- head-2008-12-15.orig/include/xen/interface/physdev.h       2008-11-25 12:35:56.000000000 +0100
++++ head-2008-12-15/include/xen/interface/physdev.h    2008-12-08 13:22:39.000000000 +0100
+@@ -41,6 +41,21 @@ typedef struct physdev_eoi physdev_eoi_t
+ DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
+ /*
++ * Register a shared page for the hypervisor to indicate whether the guest
++ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
++ * once the guest used this function in that the associated event channel
++ * will automatically get unmasked. The page registered is used as a bit
++ * array indexed by Xen's PIRQ value.
++ */
++#define PHYSDEVOP_pirq_eoi_gmfn         17
++struct physdev_pirq_eoi_gmfn {
++    /* IN */
++    xen_pfn_t gmfn;
++};
++typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t;
++DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t);
++
++/*
+  * Query the status of an IRQ line.
+  * @arg == pointer to physdev_irq_status_query structure.
+  */