]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.32 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 3 Sep 2010 22:37:54 +0000 (15:37 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 3 Sep 2010 22:37:54 +0000 (15:37 -0700)
queue-2.6.32/alsa-hda-rename-imic-to-int-mic-on-lenovo-nb0763.patch [new file with mode: 0644]
queue-2.6.32/hwmon-k8temp-differentiate-between-am2-and-asb1.patch [new file with mode: 0644]
queue-2.6.32/pci-msi-remove-unsafe-and-unnecessary-hardware-access.patch [new file with mode: 0644]
queue-2.6.32/pci-msi-restore-read_msi_msg_desc-add-get_cached_msi_msg_desc.patch [new file with mode: 0644]
queue-2.6.32/sata_mv-fix-broken-dsm-trim-support-v2.patch [new file with mode: 0644]
queue-2.6.32/x86-tsc-sched-recompute-cyc2ns_offset-s-during-resume-from-sleep-states.patch [new file with mode: 0644]
queue-2.6.32/xen-handle-events-as-edge-triggered.patch [new file with mode: 0644]
queue-2.6.32/xen-use-percpu-interrupts-for-ipis-and-virqs.patch [new file with mode: 0644]

diff --git a/queue-2.6.32/alsa-hda-rename-imic-to-int-mic-on-lenovo-nb0763.patch b/queue-2.6.32/alsa-hda-rename-imic-to-int-mic-on-lenovo-nb0763.patch
new file mode 100644 (file)
index 0000000..8536959
--- /dev/null
@@ -0,0 +1,42 @@
+From 150b432f448281d5518f5229d240923f9a9c5459 Mon Sep 17 00:00:00 2001
+From: David Henningsson <david.henningsson@canonical.com>
+Date: Thu, 29 Jul 2010 14:46:42 +0200
+Subject: ALSA: hda - Rename iMic to Int Mic on Lenovo NB0763
+
+From: David Henningsson <david.henningsson@canonical.com>
+
+commit 150b432f448281d5518f5229d240923f9a9c5459 upstream.
+
+The non-standard name "iMic" makes PulseAudio ignore the microphone.
+BugLink: https://launchpad.net/bugs/605101
+
+Signed-off-by: David Henningsson <david.henningsson@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6589,7 +6589,7 @@ static struct hda_input_mux alc883_lenov
+       .num_items = 4,
+       .items = {
+               { "Mic", 0x0 },
+-              { "iMic", 0x1 },
++              { "Int Mic", 0x1 },
+               { "Line", 0x2 },
+               { "CD", 0x4 },
+       },
+@@ -8038,8 +8038,8 @@ static struct snd_kcontrol_new alc883_le
+       HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
+       HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+       HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+-      HDA_CODEC_VOLUME("iMic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+-      HDA_CODEC_MUTE("iMic Playback Switch", 0x0b, 0x1, HDA_INPUT),
++      HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
++      HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+       { } /* end */
+ };
diff --git a/queue-2.6.32/hwmon-k8temp-differentiate-between-am2-and-asb1.patch b/queue-2.6.32/hwmon-k8temp-differentiate-between-am2-and-asb1.patch
new file mode 100644 (file)
index 0000000..d7b1515
--- /dev/null
@@ -0,0 +1,83 @@
+From a05e93f3b3fc2f53c1d0de3b17019e207c482349 Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+Date: Wed, 25 Aug 2010 15:42:12 +0200
+Subject: hwmon: (k8temp) Differentiate between AM2 and ASB1
+
+From: Andreas Herrmann <andreas.herrmann3@amd.com>
+
+commit a05e93f3b3fc2f53c1d0de3b17019e207c482349 upstream.
+
+Commit 8bf0223ed515be24de0c671eedaff49e78bebc9c (hwmon, k8temp: Fix
+temperature reporting for ASB1 processor revisions) fixed temperature
+reporting for ASB1 CPUs. But those CPU models (model 0x6b, 0x6f, 0x7f)
+were packaged both as AM2 (desktop) and ASB1 (mobile). Thus the commit
+leads to wrong temperature reporting for AM2 CPU parts.
+
+The solution is to determine the package type for models 0x6b, 0x6f,
+0x7f.
+
+This is done using BrandId from CPUID Fn8000_0001_EBX[15:0]. See
+"Constructing the processor Name String" in "Revision Guide for AMD
+NPT Family 0Fh Processors" (Rev. 3.46).
+
+Cc: Rudolf Marek <r.marek@assembler.cz>
+Reported-by: Vladislav Guberinic <neosisani@gmail.com>
+Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/k8temp.c |   35 ++++++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+--- a/drivers/hwmon/k8temp.c
++++ b/drivers/hwmon/k8temp.c
+@@ -143,6 +143,37 @@ static struct pci_device_id k8temp_ids[]
+ MODULE_DEVICE_TABLE(pci, k8temp_ids);
++static int __devinit is_rev_g_desktop(u8 model)
++{
++      u32 brandidx;
++
++      if (model < 0x69)
++              return 0;
++
++      if (model == 0xc1 || model == 0x6c || model == 0x7c)
++              return 0;
++
++      /*
++       * Differentiate between AM2 and ASB1.
++       * See "Constructing the processor Name String" in "Revision
++       * Guide for AMD NPT Family 0Fh Processors" (33610).
++       */
++      brandidx = cpuid_ebx(0x80000001);
++      brandidx = (brandidx >> 9) & 0x1f;
++
++      /* Single core */
++      if ((model == 0x6f || model == 0x7f) &&
++          (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
++              return 0;
++
++      /* Dual core */
++      if (model == 0x6b &&
++          (brandidx == 0xb || brandidx == 0xc))
++              return 0;
++
++      return 1;
++}
++
+ static int __devinit k8temp_probe(struct pci_dev *pdev,
+                                 const struct pci_device_id *id)
+ {
+@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct
+                                "wrong - check erratum #141\n");
+               }
+-              if ((model >= 0x69) &&
+-                  !(model == 0xc1 || model == 0x6c || model == 0x7c ||
+-                    model == 0x6b || model == 0x6f || model == 0x7f)) {
++              if (is_rev_g_desktop(model)) {
+                       /*
+                        * RevG desktop CPUs (i.e. no socket S1G1 or
+                        * ASB1 parts) need additional offset,
diff --git a/queue-2.6.32/pci-msi-remove-unsafe-and-unnecessary-hardware-access.patch b/queue-2.6.32/pci-msi-remove-unsafe-and-unnecessary-hardware-access.patch
new file mode 100644 (file)
index 0000000..6ed8215
--- /dev/null
@@ -0,0 +1,86 @@
+From fcd097f31a6ee207cc0c3da9cccd2a86d4334785 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Thu, 17 Jun 2010 20:16:36 +0100
+Subject: PCI: MSI: Remove unsafe and unnecessary hardware access
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+commit fcd097f31a6ee207cc0c3da9cccd2a86d4334785 upstream.
+
+During suspend on an SMP system, {read,write}_msi_msg_desc() may be
+called to mask and unmask interrupts on a device that is already in a
+reduced power state.  At this point memory-mapped registers including
+MSI-X tables are not accessible, and config space may not be fully
+functional either.
+
+While a device is in a reduced power state its interrupts are
+effectively masked and its MSI(-X) state will be restored when it is
+brought back to D0.  Therefore these functions can simply read and
+write msi_desc::msg for devices not in D0.
+
+Further, read_msi_msg_desc() should only ever be used to update a
+previously written message, so it can always read msi_desc::msg
+and never needs to touch the hardware.
+
+Tested-by: "Michael Chan" <mchan@broadcom.com>
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/msi.c |   36 ++++++++++++------------------------
+ 1 file changed, 12 insertions(+), 24 deletions(-)
+
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -195,30 +195,15 @@ void unmask_msi_irq(unsigned int irq)
+ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+ {
+       struct msi_desc *entry = get_irq_desc_msi(desc);
+-      if (entry->msi_attrib.is_msix) {
+-              void __iomem *base = entry->mask_base +
+-                      entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+-              msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
+-              msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
+-              msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
+-      } else {
+-              struct pci_dev *dev = entry->dev;
+-              int pos = entry->msi_attrib.pos;
+-              u16 data;
+-
+-              pci_read_config_dword(dev, msi_lower_address_reg(pos),
+-                                      &msg->address_lo);
+-              if (entry->msi_attrib.is_64) {
+-                      pci_read_config_dword(dev, msi_upper_address_reg(pos),
+-                                              &msg->address_hi);
+-                      pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
+-              } else {
+-                      msg->address_hi = 0;
+-                      pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
+-              }
+-              msg->data = data;
+-      }
++      /* We do not touch the hardware (which may not even be
++       * accessible at the moment) but return the last message
++       * written.  Assert that this is valid, assuming that
++       * valid messages are not all-zeroes. */
++      BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
++               entry->msg.data));
++
++      *msg = entry->msg;
+ }
+ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
+@@ -231,7 +216,10 @@ void read_msi_msg(unsigned int irq, stru
+ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+ {
+       struct msi_desc *entry = get_irq_desc_msi(desc);
+-      if (entry->msi_attrib.is_msix) {
++
++      if (entry->dev->current_state != PCI_D0) {
++              /* Don't touch the hardware now */
++      } else if (entry->msi_attrib.is_msix) {
+               void __iomem *base;
+               base = entry->mask_base +
+                       entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
diff --git a/queue-2.6.32/pci-msi-restore-read_msi_msg_desc-add-get_cached_msi_msg_desc.patch b/queue-2.6.32/pci-msi-restore-read_msi_msg_desc-add-get_cached_msi_msg_desc.patch
new file mode 100644 (file)
index 0000000..b421a8b
--- /dev/null
@@ -0,0 +1,148 @@
+From 30da55242818a8ca08583188ebcbaccd283ad4d9 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Fri, 23 Jul 2010 14:56:28 +0100
+Subject: PCI: MSI: Restore read_msi_msg_desc(); add get_cached_msi_msg_desc()
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+commit 30da55242818a8ca08583188ebcbaccd283ad4d9 upstream.
+
+commit 2ca1af9aa3285c6a5f103ed31ad09f7399fc65d7 "PCI: MSI: Remove
+unsafe and unnecessary hardware access" changed read_msi_msg_desc() to
+return the last MSI message written instead of reading it from the
+device, since it may be called while the device is in a reduced
+power state.
+
+However, the pSeries platform code really does need to read messages
+from the device, since they are initially written by firmware.
+Therefore:
+- Restore the previous behaviour of read_msi_msg_desc()
+- Add new functions get_cached_msi_msg{,_desc}() which return the
+  last MSI message written
+- Use the new functions where appropriate
+
+Acked-by: Michael Ellerman <michael@ellerman.id.au>
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/ia64/kernel/msi_ia64.c    |    2 -
+ arch/ia64/sn/kernel/msi_sn.c   |    2 -
+ arch/x86/kernel/apic/io_apic.c |    2 -
+ drivers/pci/msi.c              |   47 ++++++++++++++++++++++++++++++++++++-----
+ include/linux/msi.h            |    2 +
+ 5 files changed, 47 insertions(+), 8 deletions(-)
+
+--- a/arch/ia64/kernel/msi_ia64.c
++++ b/arch/ia64/kernel/msi_ia64.c
+@@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(uns
+       if (irq_prepare_move(irq, cpu))
+               return -1;
+-      read_msi_msg(irq, &msg);
++      get_cached_msi_msg(irq, &msg);
+       addr = msg.address_lo;
+       addr &= MSI_ADDR_DEST_ID_MASK;
+--- a/arch/ia64/sn/kernel/msi_sn.c
++++ b/arch/ia64/sn/kernel/msi_sn.c
+@@ -174,7 +174,7 @@ static int sn_set_msi_irq_affinity(unsig
+        * Release XIO resources for the old MSI PCI address
+        */
+-      read_msi_msg(irq, &msg);
++      get_cached_msi_msg(irq, &msg);
+         sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+       pdev = sn_pdev->pdi_linux_pcidev;
+       provider = SN_PCIDEV_BUSPROVIDER(pdev);
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -3338,7 +3338,7 @@ static int set_msi_irq_affinity(unsigned
+       cfg = desc->chip_data;
+-      read_msi_msg_desc(desc, &msg);
++      get_cached_msi_msg_desc(desc, &msg);
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -196,9 +196,46 @@ void read_msi_msg_desc(struct irq_desc *
+ {
+       struct msi_desc *entry = get_irq_desc_msi(desc);
+-      /* We do not touch the hardware (which may not even be
+-       * accessible at the moment) but return the last message
+-       * written.  Assert that this is valid, assuming that
++      BUG_ON(entry->dev->current_state != PCI_D0);
++
++      if (entry->msi_attrib.is_msix) {
++              void __iomem *base = entry->mask_base +
++                      entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
++
++              msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
++              msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
++              msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
++      } else {
++              struct pci_dev *dev = entry->dev;
++              int pos = entry->msi_attrib.pos;
++              u16 data;
++
++              pci_read_config_dword(dev, msi_lower_address_reg(pos),
++                                      &msg->address_lo);
++              if (entry->msi_attrib.is_64) {
++                      pci_read_config_dword(dev, msi_upper_address_reg(pos),
++                                              &msg->address_hi);
++                      pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
++              } else {
++                      msg->address_hi = 0;
++                      pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
++              }
++              msg->data = data;
++      }
++}
++
++void read_msi_msg(unsigned int irq, struct msi_msg *msg)
++{
++      struct irq_desc *desc = irq_to_desc(irq);
++
++      read_msi_msg_desc(desc, msg);
++}
++
++void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
++{
++      struct msi_desc *entry = get_irq_desc_msi(desc);
++
++      /* Assert that the cache is valid, assuming that
+        * valid messages are not all-zeroes. */
+       BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
+                entry->msg.data));
+@@ -206,11 +243,11 @@ void read_msi_msg_desc(struct irq_desc *
+       *msg = entry->msg;
+ }
+-void read_msi_msg(unsigned int irq, struct msi_msg *msg)
++void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
+ {
+       struct irq_desc *desc = irq_to_desc(irq);
+-      read_msi_msg_desc(desc, msg);
++      get_cached_msi_msg_desc(desc, msg);
+ }
+ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -14,8 +14,10 @@ struct irq_desc;
+ extern void mask_msi_irq(unsigned int irq);
+ extern void unmask_msi_irq(unsigned int irq);
+ extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
++extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+ extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+ extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
++extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
+ struct msi_desc {
diff --git a/queue-2.6.32/sata_mv-fix-broken-dsm-trim-support-v2.patch b/queue-2.6.32/sata_mv-fix-broken-dsm-trim-support-v2.patch
new file mode 100644 (file)
index 0000000..c9ade45
--- /dev/null
@@ -0,0 +1,117 @@
+From 44b733809a5aba7f6b15a548d31a56d25bf3851c Mon Sep 17 00:00:00 2001
+From: Mark Lord <kernel@teksavvy.com>
+Date: Thu, 19 Aug 2010 21:40:44 -0400
+Subject: sata_mv: fix broken DSM/TRIM support (v2)
+
+From: Mark Lord <kernel@teksavvy.com>
+
+commit 44b733809a5aba7f6b15a548d31a56d25bf3851c upstream.
+
+Fix DSM/TRIM commands in sata_mv (v2).
+These need to be issued using old-school "BM DMA",
+rather than via the EDMA host queue.
+
+Since the chips don't have proper BM DMA status,
+we need to be more careful with setting the ATA_DMA_INTR bit,
+since DSM/TRIM often has a long delay between "DMA complete"
+and "command complete".
+
+GEN_I chips don't have BM DMA, so no TRIM for them.
+
+Signed-off-by: Mark Lord <mlord@pobox.com>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/sata_mv.c |   44 +++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 37 insertions(+), 7 deletions(-)
+
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -1879,19 +1879,25 @@ static void mv_bmdma_start(struct ata_qu
+  *    LOCKING:
+  *    Inherited from caller.
+  */
+-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
++static void mv_bmdma_stop_ap(struct ata_port *ap)
+ {
+-      struct ata_port *ap = qc->ap;
+       void __iomem *port_mmio = mv_ap_base(ap);
+       u32 cmd;
+       /* clear start/stop bit */
+       cmd = readl(port_mmio + BMDMA_CMD);
+-      cmd &= ~ATA_DMA_START;
+-      writelfl(cmd, port_mmio + BMDMA_CMD);
++      if (cmd & ATA_DMA_START) {
++              cmd &= ~ATA_DMA_START;
++              writelfl(cmd, port_mmio + BMDMA_CMD);
+-      /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+-      ata_sff_dma_pause(ap);
++              /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
++              ata_sff_dma_pause(ap);
++      }
++}
++
++static void mv_bmdma_stop(struct ata_queued_cmd *qc)
++{
++      mv_bmdma_stop_ap(qc->ap);
+ }
+ /**
+@@ -1915,8 +1921,21 @@ static u8 mv_bmdma_status(struct ata_por
+       reg = readl(port_mmio + BMDMA_STATUS);
+       if (reg & ATA_DMA_ACTIVE)
+               status = ATA_DMA_ACTIVE;
+-      else
++      else if (reg & ATA_DMA_ERR)
+               status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
++      else {
++              /*
++               * Just because DMA_ACTIVE is 0 (DMA completed),
++               * this does _not_ mean the device is "done".
++               * So we should not yet be signalling ATA_DMA_INTR
++               * in some cases.  Eg. DSM/TRIM, and perhaps others.
++               */
++              mv_bmdma_stop_ap(ap);
++              if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
++                      status = 0;
++              else
++                      status = ATA_DMA_INTR;
++      }
+       return status;
+ }
+@@ -1976,6 +1995,9 @@ static void mv_qc_prep(struct ata_queued
+       switch (tf->protocol) {
+       case ATA_PROT_DMA:
++              if (tf->command == ATA_CMD_DSM)
++                      return;
++              /* fall-thru */
+       case ATA_PROT_NCQ:
+               break;  /* continue below */
+       case ATA_PROT_PIO:
+@@ -2075,6 +2097,8 @@ static void mv_qc_prep_iie(struct ata_qu
+       if ((tf->protocol != ATA_PROT_DMA) &&
+           (tf->protocol != ATA_PROT_NCQ))
+               return;
++      if (tf->command == ATA_CMD_DSM)
++              return;  /* use bmdma for this */
+       /* Fill in Gen IIE command request block */
+       if (!(tf->flags & ATA_TFLAG_WRITE))
+@@ -2270,6 +2294,12 @@ static unsigned int mv_qc_issue(struct a
+       switch (qc->tf.protocol) {
+       case ATA_PROT_DMA:
++              if (qc->tf.command == ATA_CMD_DSM) {
++                      if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
++                              return AC_ERR_OTHER;
++                      break;  /* use bmdma for this */
++              }
++              /* fall thru */
+       case ATA_PROT_NCQ:
+               mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
+               pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/queue-2.6.32/x86-tsc-sched-recompute-cyc2ns_offset-s-during-resume-from-sleep-states.patch b/queue-2.6.32/x86-tsc-sched-recompute-cyc2ns_offset-s-during-resume-from-sleep-states.patch
new file mode 100644 (file)
index 0000000..c0d1e86
--- /dev/null
@@ -0,0 +1,115 @@
+From cd7240c0b900eb6d690ccee088a6c9b46dae815a Mon Sep 17 00:00:00 2001
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Thu, 19 Aug 2010 17:03:38 -0700
+Subject: x86, tsc, sched: Recompute cyc2ns_offset's during resume from sleep states
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit cd7240c0b900eb6d690ccee088a6c9b46dae815a upstream.
+
+TSC's get reset after suspend/resume (even on cpu's with invariant TSC
+which runs at a constant rate across ACPI P-, C- and T-states). And in
+some systems BIOS seem to reinit TSC to arbitrary large value (still
+sync'd across cpu's) during resume.
+
+This leads to a scenario of scheduler rq->clock (sched_clock_cpu()) less
+than rq->age_stamp (introduced in 2.6.32). This leads to a big value
+returned by scale_rt_power() and the resulting big group power set by the
+update_group_power() is causing improper load balancing between busy and
+idle cpu's after suspend/resume.
+
+This resulted in multi-threaded workloads (like kernel-compilation) go
+slower after suspend/resume cycle on core i5 laptops.
+
+Fix this by recomputing cyc2ns_offset's during resume, so that
+sched_clock() continues from the point where it was left off during
+suspend.
+
+Reported-by: Florian Pritz <flo@xssn.at>
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <1282262618.2675.24.camel@sbsiddha-MOBL3.sc.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/tsc.h |    2 ++
+ arch/x86/kernel/tsc.c      |   38 ++++++++++++++++++++++++++++++++++++++
+ arch/x86/power/cpu.c       |    2 ++
+ 3 files changed, 42 insertions(+)
+
+--- a/arch/x86/include/asm/tsc.h
++++ b/arch/x86/include/asm/tsc.h
+@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cp
+ extern void check_tsc_sync_target(void);
+ extern int notsc_setup(char *);
++extern void save_sched_clock_state(void);
++extern void restore_sched_clock_state(void);
+ #endif /* _ASM_X86_TSC_H */
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned lo
+       local_irq_restore(flags);
+ }
++static unsigned long long cyc2ns_suspend;
++
++void save_sched_clock_state(void)
++{
++      if (!sched_clock_stable)
++              return;
++
++      cyc2ns_suspend = sched_clock();
++}
++
++/*
++ * Even on processors with invariant TSC, TSC gets reset in some the
++ * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
++ * arbitrary value (still sync'd across cpu's) during resume from such sleep
++ * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
++ * that sched_clock() continues from the point where it was left off during
++ * suspend.
++ */
++void restore_sched_clock_state(void)
++{
++      unsigned long long offset;
++      unsigned long flags;
++      int cpu;
++
++      if (!sched_clock_stable)
++              return;
++
++      local_irq_save(flags);
++
++      get_cpu_var(cyc2ns_offset) = 0;
++      offset = cyc2ns_suspend - sched_clock();
++
++      for_each_possible_cpu(cpu)
++              per_cpu(cyc2ns_offset, cpu) = offset;
++
++      local_irq_restore(flags);
++}
++
+ #ifdef CONFIG_CPU_FREQ
+ /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -112,6 +112,7 @@ static void __save_processor_state(struc
+ void save_processor_state(void)
+ {
+       __save_processor_state(&saved_context);
++      save_sched_clock_state();
+ }
+ #ifdef CONFIG_X86_32
+ EXPORT_SYMBOL(save_processor_state);
+@@ -253,6 +254,7 @@ static void __restore_processor_state(st
+ void restore_processor_state(void)
+ {
+       __restore_processor_state(&saved_context);
++      restore_sched_clock_state();
+ }
+ #ifdef CONFIG_X86_32
+ EXPORT_SYMBOL(restore_processor_state);
diff --git a/queue-2.6.32/xen-handle-events-as-edge-triggered.patch b/queue-2.6.32/xen-handle-events-as-edge-triggered.patch
new file mode 100644 (file)
index 0000000..6603dda
--- /dev/null
@@ -0,0 +1,44 @@
+From dffe2e1e1a1ddb566a76266136c312801c66dcf7 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Date: Fri, 20 Aug 2010 19:10:01 -0700
+Subject: xen: handle events as edge-triggered
+
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+
+commit dffe2e1e1a1ddb566a76266136c312801c66dcf7 upstream.
+
+Xen events are logically edge triggered, as Xen only calls the event
+upcall when an event is newly set, but not continuously as it remains set.
+As a result, use handle_edge_irq rather than handle_level_irq.
+
+This has the important side-effect of fixing a long-standing bug of
+events getting lost if:
+ - an event's interrupt handler is running
+ - the event is migrated to a different vcpu
+ - the event is re-triggered
+
+The most noticable symptom of these lost events is occasional lockups
+of blkfront.
+
+Many thanks to Tom Kopec and Daniel Stodden in tracking this down.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Cc: Tom Kopec <tek@acm.org>
+Cc: Daniel Stodden <daniel.stodden@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/events.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -362,7 +362,7 @@ int bind_evtchn_to_irq(unsigned int evtc
+               irq = find_unbound_irq();
+               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+-                                            handle_level_irq, "event");
++                                            handle_edge_irq, "event");
+               evtchn_to_irq[evtchn] = irq;
+               irq_info[irq] = mk_evtchn_info(evtchn);
diff --git a/queue-2.6.32/xen-use-percpu-interrupts-for-ipis-and-virqs.patch b/queue-2.6.32/xen-use-percpu-interrupts-for-ipis-and-virqs.patch
new file mode 100644 (file)
index 0000000..4f147b6
--- /dev/null
@@ -0,0 +1,73 @@
+From aaca49642b92c8a57d3ca5029a5a94019c7af69f Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Date: Fri, 20 Aug 2010 18:57:53 -0700
+Subject: xen: use percpu interrupts for IPIs and VIRQs
+
+From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+
+commit aaca49642b92c8a57d3ca5029a5a94019c7af69f upstream.
+
+IPIs and VIRQs are inherently per-cpu event types, so treat them as such:
+ - use a specific percpu irq_chip implementation, and
+ - handle them with handle_percpu_irq
+
+This makes the path for delivering these interrupts more efficient
+(no masking/unmasking, no locks), and it avoid problems with attempts
+to migrate them.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/xen/events.c |   19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -106,6 +106,7 @@ static inline unsigned long *cpu_evtchn_
+ #define VALID_EVTCHN(chn)     ((chn) != 0)
+ static struct irq_chip xen_dynamic_chip;
++static struct irq_chip xen_percpu_chip;
+ /* Constructor for packed IRQ information. */
+ static struct irq_info mk_unbound_info(void)
+@@ -388,8 +389,8 @@ static int bind_ipi_to_irq(unsigned int
+               if (irq < 0)
+                       goto out;
+-              set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+-                                            handle_level_irq, "ipi");
++              set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
++                                            handle_percpu_irq, "ipi");
+               bind_ipi.vcpu = cpu;
+               if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+@@ -429,8 +430,8 @@ static int bind_virq_to_irq(unsigned int
+               irq = find_unbound_irq();
+-              set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+-                                            handle_level_irq, "virq");
++              set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
++                                            handle_percpu_irq, "virq");
+               evtchn_to_irq[evtchn] = irq;
+               irq_info[irq] = mk_virq_info(evtchn, virq);
+@@ -929,6 +930,16 @@ static struct irq_chip xen_dynamic_chip
+       .retrigger      = retrigger_dynirq,
+ };
++static struct irq_chip en_percpu_chip __read_mostly = {
++      .name           = "xen-percpu",
++
++      .disable        = disable_dynirq,
++      .mask           = disable_dynirq,
++      .unmask         = enable_dynirq,
++
++      .ack            = ack_dynirq,
++};
++
+ void __init xen_init_IRQ(void)
+ {
+       int i;