]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.35 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 10 Aug 2010 22:47:20 +0000 (15:47 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 10 Aug 2010 22:47:20 +0000 (15:47 -0700)
queue-2.6.35/ata_piix-fix-locking-around-sidpr-access.patch [new file with mode: 0644]
queue-2.6.35/pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch [new file with mode: 0644]
queue-2.6.35/perf-powerpc-fsl_emb-restore-setting-perf_sample_data.period.patch [new file with mode: 0644]
queue-2.6.35/powerpc-fix-build-with-make-3.82.patch [new file with mode: 0644]
queue-2.6.35/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch [new file with mode: 0644]
queue-2.6.35/x86-kmmio-mmiotrace-fix-double-free-of-kmmio_fault_pages.patch [new file with mode: 0644]
queue-2.6.35/x86-pci-use-host-bridge-_crs-info-on-asrock-alivesata2-glan.patch [new file with mode: 0644]
queue-2.6.35/x86-vmware-preset-lpj-values-when-on-vmware.patch [new file with mode: 0644]

diff --git a/queue-2.6.35/ata_piix-fix-locking-around-sidpr-access.patch b/queue-2.6.35/ata_piix-fix-locking-around-sidpr-access.patch
new file mode 100644 (file)
index 0000000..9d41c76
--- /dev/null
@@ -0,0 +1,77 @@
+From 213373cf974fe69e78ec894b07f45ae2f5a3a078 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 20 Jul 2010 16:20:01 +0200
+Subject: ata_piix: fix locking around SIDPR access
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 213373cf974fe69e78ec894b07f45ae2f5a3a078 upstream.
+
+SIDPR window registers are shared across ports and as each access is
+done in two steps, accesses to different ports under EH may race.
+This primarily is caused by incorrect host locking in EH context and
+should be fixed by defining locking requirements for each EH operation
+which can be used during EH and enforcing them but for now work around
+the problem by adding a dedicated SIDPR lock and grabbing it for each
+SIDPR access.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Mark Knecht <markknecht@gmail.com>
+Reported-by: Paul Check <paul@thechecks.ca>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/ata_piix.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -158,6 +158,7 @@ struct piix_map_db {
+ struct piix_host_priv {
+       const int *map;
+       u32 saved_iocfg;
++      spinlock_t sidpr_lock;  /* FIXME: remove once locking in EH is fixed */
+       void __iomem *sidpr;
+ };
+@@ -951,12 +952,15 @@ static int piix_sidpr_scr_read(struct at
+                              unsigned int reg, u32 *val)
+ {
+       struct piix_host_priv *hpriv = link->ap->host->private_data;
++      unsigned long flags;
+       if (reg >= ARRAY_SIZE(piix_sidx_map))
+               return -EINVAL;
++      spin_lock_irqsave(&hpriv->sidpr_lock, flags);
+       piix_sidpr_sel(link, reg);
+       *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
++      spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
+       return 0;
+ }
+@@ -964,12 +968,15 @@ static int piix_sidpr_scr_write(struct a
+                               unsigned int reg, u32 val)
+ {
+       struct piix_host_priv *hpriv = link->ap->host->private_data;
++      unsigned long flags;
+       if (reg >= ARRAY_SIZE(piix_sidx_map))
+               return -EINVAL;
++      spin_lock_irqsave(&hpriv->sidpr_lock, flags);
+       piix_sidpr_sel(link, reg);
+       iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
++      spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
+       return 0;
+ }
+@@ -1566,6 +1573,7 @@ static int __devinit piix_init_one(struc
+       hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+       if (!hpriv)
+               return -ENOMEM;
++      spin_lock_init(&hpriv->sidpr_lock);
+       /* Save IOCFG, this will be used for cable detection, quirk
+        * detection and restoration on detach.  This is necessary
diff --git a/queue-2.6.35/pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch b/queue-2.6.35/pcmcia-avoid-buffer-overflow-in-pcmcia_setup_isa_irq.patch
new file mode 100644 (file)
index 0000000..07067a3
--- /dev/null
@@ -0,0 +1,57 @@
+From 127c03cdbad9bd5af5d7f33bd31a1015a90cb77f Mon Sep 17 00:00:00 2001
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Tue, 3 Aug 2010 09:33:45 +0200
+Subject: pcmcia: avoid buffer overflow in pcmcia_setup_isa_irq
+
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+
+commit 127c03cdbad9bd5af5d7f33bd31a1015a90cb77f upstream.
+
+NR_IRQS may be as low as 16, causing a (harmless?) buffer overflow in
+pcmcia_setup_isa_irq():
+
+static u8 pcmcia_used_irq[NR_IRQS];
+
+...
+
+               if ((try < 32) && pcmcia_used_irq[irq])
+                       continue;
+
+This is read-only, so if this address would be non-zero, it would just
+mean we would not attempt an IRQ >= NR_IRQS -- which would fail anyway!
+And as request_irq() fails for an irq >= NR_IRQS, the setting code path:
+
+                       pcmcia_used_irq[irq]++;
+
+is never reached as well.
+
+Reported-by: Christoph Fritz <chf.fritz@googlemail.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Christoph Fritz <chf.fritz@googlemail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pcmcia/pcmcia_resource.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/pcmcia/pcmcia_resource.c
++++ b/drivers/pcmcia/pcmcia_resource.c
+@@ -651,7 +651,7 @@ EXPORT_SYMBOL(__pcmcia_request_exclusive
+ #ifdef CONFIG_PCMCIA_PROBE
+ /* mask of IRQs already reserved by other cards, we should avoid using them */
+-static u8 pcmcia_used_irq[NR_IRQS];
++static u8 pcmcia_used_irq[32];
+ static irqreturn_t test_action(int cpl, void *dev_id)
+ {
+@@ -674,6 +674,9 @@ static int pcmcia_setup_isa_irq(struct p
+       for (try = 0; try < 64; try++) {
+               irq = try % 32;
++              if (irq > NR_IRQS)
++                      continue;
++
+               /* marked as available by driver, not blocked by userspace? */
+               if (!((mask >> irq) & 1))
+                       continue;
diff --git a/queue-2.6.35/perf-powerpc-fsl_emb-restore-setting-perf_sample_data.period.patch b/queue-2.6.35/perf-powerpc-fsl_emb-restore-setting-perf_sample_data.period.patch
new file mode 100644 (file)
index 0000000..352ac6f
--- /dev/null
@@ -0,0 +1,31 @@
+From 69e77a8b0426ded5d924eea7dbe4eca51e09f530 Mon Sep 17 00:00:00 2001
+From: Scott Wood <scottwood@freescale.com>
+Date: Mon, 2 Aug 2010 17:17:18 -0500
+Subject: perf, powerpc: fsl_emb: Restore setting perf_sample_data.period
+
+From: Scott Wood <scottwood@freescale.com>
+
+commit 69e77a8b0426ded5d924eea7dbe4eca51e09f530 upstream.
+
+Commit 6b95ed345b9faa4ab3598a82991968f2e9f851bb changed from
+a struct initializer to perf_sample_data_init(), but the setting
+of the .period member was left out.
+
+Signed-off-by: Scott Wood <scottwood@freescale.com>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/kernel/perf_event_fsl_emb.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
++++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
+@@ -569,6 +569,7 @@ static void record_and_restart(struct pe
+               struct perf_sample_data data;
+               perf_sample_data_init(&data, 0);
++              data.period = event->hw.last_period;
+               if (perf_event_overflow(event, nmi, &data, regs)) {
+                       /*
diff --git a/queue-2.6.35/powerpc-fix-build-with-make-3.82.patch b/queue-2.6.35/powerpc-fix-build-with-make-3.82.patch
new file mode 100644 (file)
index 0000000..e8225f0
--- /dev/null
@@ -0,0 +1,62 @@
+From e32e78c5ee8aadef020fbaecbe6fb741ed9029fd Mon Sep 17 00:00:00 2001
+From: Sam Ravnborg <sam@ravnborg.org>
+Date: Mon, 2 Aug 2010 20:47:48 +0000
+Subject: powerpc: fix build with make 3.82
+
+From: Sam Ravnborg <sam@ravnborg.org>
+
+commit e32e78c5ee8aadef020fbaecbe6fb741ed9029fd upstream.
+
+Thomas Backlund reported that the powerpc build broke with make 3.82.
+It failed with the following message:
+
+    arch/powerpc/Makefile:183: *** mixed implicit and normal rules.  Stop.
+
+The fix is to avoid mixing non-wildcard and wildcard targets.
+
+Reported-by: Thomas Backlund <tmb@mandriva.org>
+Tested-by: Thomas Backlund <tmb@mandriva.org>
+Cc: Michal Marek <mmarek@suse.cz>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/Makefile |   16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -163,9 +163,11 @@ drivers-$(CONFIG_OPROFILE)        += arch/power
+ # Default to zImage, override when needed
+ all: zImage
+-BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
++# With make 3.82 we cannot mix normal and wildcard targets
++BOOT_TARGETS1 := zImage zImage.initrd uImaged
++BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+-PHONY += $(BOOT_TARGETS)
++PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
+ boot := arch/$(ARCH)/boot
+@@ -180,10 +182,16 @@ relocs_check: arch/powerpc/relocs_check.
+ zImage: relocs_check
+ endif
+-$(BOOT_TARGETS): vmlinux
++$(BOOT_TARGETS1): vmlinux
++      $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++$(BOOT_TARGETS2): vmlinux
++      $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++
++
++bootwrapper_install:
+       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+-bootwrapper_install %.dtb:
++%.dtb:
+       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ define archhelp
diff --git a/queue-2.6.35/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch b/queue-2.6.35/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch
new file mode 100644 (file)
index 0000000..d294d4f
--- /dev/null
@@ -0,0 +1,227 @@
+From 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 Mon Sep 17 00:00:00 2001
+From: H. Peter Anvin <hpa@zytor.com>
+Date: Tue, 27 Jul 2010 17:01:49 -0700
+Subject: x86: Add memory modify constraints to xchg() and cmpxchg()
+
+From: H. Peter Anvin <hpa@zytor.com>
+
+commit 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 upstream.
+
+xchg() and cmpxchg() modify their memory operands, not merely read
+them.  For some versions of gcc the "memory" clobber has apparently
+dealt with the situation, but not for all.
+
+Originally-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Glauber Costa <glommer@redhat.com>
+Cc: Avi Kivity <avi@redhat.com>
+Cc: Peter Palfrader <peter@palfrader.org>
+Cc: Greg KH <gregkh@suse.de>
+Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Cc: Zachary Amsden <zamsden@redhat.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+LKML-Reference: <4C4F7277.8050306@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/cmpxchg_32.h |   68 +++++++++++++++++++-------------------
+ arch/x86/include/asm/cmpxchg_64.h |   40 +++++++++++-----------
+ 2 files changed, 54 insertions(+), 54 deletions(-)
+
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -27,20 +27,20 @@ struct __xchg_dummy {
+       switch (size) {                                                 \
+       case 1:                                                         \
+               asm volatile("xchgb %b0,%1"                             \
+-                           : "=q" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=q" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("xchgw %w0,%1"                             \
+-                           : "=r" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=r" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("xchgl %0,%1"                              \
+-                           : "=r" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=r" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       default:                                                        \
+@@ -70,14 +70,14 @@ static inline void __set_64bit(unsigned
+                              unsigned int low, unsigned int high)
+ {
+       asm volatile("\n1:\t"
+-                   "movl (%0), %%eax\n\t"
+-                   "movl 4(%0), %%edx\n\t"
+-                   LOCK_PREFIX "cmpxchg8b (%0)\n\t"
++                   "movl (%1), %%eax\n\t"
++                   "movl 4(%1), %%edx\n\t"
++                   LOCK_PREFIX "cmpxchg8b (%1)\n\t"
+                    "jnz 1b"
+-                   : /* no outputs */
+-                   : "D"(ptr),
+-                     "b"(low),
+-                     "c"(high)
++                   : "=m" (*ptr)
++                   : "D" (ptr),
++                     "b" (low),
++                     "c" (high)
+                    : "ax", "dx", "memory");
+ }
+@@ -121,21 +121,21 @@ extern void __cmpxchg_wrong_size(void);
+       __typeof__(*(ptr)) __new = (new);                               \
+       switch (size) {                                                 \
+       case 1:                                                         \
+-              asm volatile(lock "cmpxchgb %b1,%2"                     \
+-                           : "=a"(__ret)                              \
+-                           : "q"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgb %b2,%1"                     \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "q" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       case 2:                                                         \
+-              asm volatile(lock "cmpxchgw %w1,%2"                     \
+-                           : "=a"(__ret)                              \
+-                           : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgw %w2,%1"                     \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "r" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       case 4:                                                         \
+-              asm volatile(lock "cmpxchgl %1,%2"                      \
+-                           : "=a"(__ret)                              \
+-                           : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgl %2,%1"                      \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "r" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       default:                                                        \
+@@ -180,12 +180,12 @@ static inline unsigned long long __cmpxc
+                                            unsigned long long new)
+ {
+       unsigned long long prev;
+-      asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+-                   : "=A"(prev)
+-                   : "b"((unsigned long)new),
+-                     "c"((unsigned long)(new >> 32)),
+-                     "m"(*__xg(ptr)),
+-                     "0"(old)
++      asm volatile(LOCK_PREFIX "cmpxchg8b %1"
++                   : "=A" (prev),
++                     "+m" (*__xg(ptr))
++                   : "b" ((unsigned long)new),
++                     "c" ((unsigned long)(new >> 32)),
++                     "0" (old)
+                    : "memory");
+       return prev;
+ }
+@@ -195,12 +195,12 @@ static inline unsigned long long __cmpxc
+                                                  unsigned long long new)
+ {
+       unsigned long long prev;
+-      asm volatile("cmpxchg8b %3"
+-                   : "=A"(prev)
+-                   : "b"((unsigned long)new),
+-                     "c"((unsigned long)(new >> 32)),
+-                     "m"(*__xg(ptr)),
+-                     "0"(old)
++      asm volatile("cmpxchg8b %1"
++                   : "=A" (prev),
++                     "+m" (*__xg(ptr))
++                   : "b" ((unsigned long)new),
++                     "c" ((unsigned long)(new >> 32)),
++                     "0" (old)
+                    : "memory");
+       return prev;
+ }
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -26,26 +26,26 @@ extern void __cmpxchg_wrong_size(void);
+       switch (size) {                                                 \
+       case 1:                                                         \
+               asm volatile("xchgb %b0,%1"                             \
+-                           : "=q" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=q" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("xchgw %w0,%1"                             \
+-                           : "=r" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=r" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("xchgl %k0,%1"                             \
+-                           : "=r" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=r" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile("xchgq %0,%1"                              \
+-                           : "=r" (__x)                               \
+-                           : "m" (*__xg(ptr)), "0" (__x)              \
++                           : "=r" (__x), "+m" (*__xg(ptr))            \
++                           : "0" (__x)                                \
+                            : "memory");                               \
+               break;                                                  \
+       default:                                                        \
+@@ -71,27 +71,27 @@ extern void __cmpxchg_wrong_size(void);
+       __typeof__(*(ptr)) __new = (new);                               \
+       switch (size) {                                                 \
+       case 1:                                                         \
+-              asm volatile(lock "cmpxchgb %b1,%2"                     \
+-                           : "=a"(__ret)                              \
+-                           : "q"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgb %b2,%1"                     \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "q" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       case 2:                                                         \
+-              asm volatile(lock "cmpxchgw %w1,%2"                     \
+-                           : "=a"(__ret)                              \
+-                           : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgw %w2,%1"                     \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "r" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       case 4:                                                         \
+-              asm volatile(lock "cmpxchgl %k1,%2"                     \
+-                           : "=a"(__ret)                              \
+-                           : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgl %k2,%1"                     \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "r" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       case 8:                                                         \
+-              asm volatile(lock "cmpxchgq %1,%2"                      \
+-                           : "=a"(__ret)                              \
+-                           : "r"(__new), "m"(*__xg(ptr)), "0"(__old)  \
++              asm volatile(lock "cmpxchgq %2,%1"                      \
++                           : "=a" (__ret), "+m" (*__xg(ptr))          \
++                           : "r" (__new), "0" (__old)                 \
+                            : "memory");                               \
+               break;                                                  \
+       default:                                                        \
diff --git a/queue-2.6.35/x86-kmmio-mmiotrace-fix-double-free-of-kmmio_fault_pages.patch b/queue-2.6.35/x86-kmmio-mmiotrace-fix-double-free-of-kmmio_fault_pages.patch
new file mode 100644 (file)
index 0000000..fc370db
--- /dev/null
@@ -0,0 +1,122 @@
+From 8b8f79b927b6b302bb65fb8c56e7a19be5fbdbef Mon Sep 17 00:00:00 2001
+From: Marcin Slusarz <marcin.slusarz@gmail.com>
+Date: Sun, 13 Jun 2010 23:56:54 +0200
+Subject: x86, kmmio/mmiotrace: Fix double free of kmmio_fault_pages
+
+From: Marcin Slusarz <marcin.slusarz@gmail.com>
+
+commit 8b8f79b927b6b302bb65fb8c56e7a19be5fbdbef upstream.
+
+After every iounmap mmiotrace has to free kmmio_fault_pages, but
+it can't do it directly, so it defers freeing by RCU.
+
+It usually works, but when mmiotraced code calls ioremap-iounmap
+multiple times without sleeping between (so RCU won't kick in
+and start freeing) it can be given the same virtual address, so
+at every iounmap mmiotrace will schedule the same pages for
+release. Obviously it will explode on second free.
+
+Fix it by marking kmmio_fault_pages which are scheduled for
+release and not adding them second time.
+
+Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
+Tested-by: Marcin Kocielnicki <koriakin@0x04.net>
+Tested-by: Shinpei KATO <shinpei@il.is.s.u-tokyo.ac.jp>
+Acked-by: Pekka Paalanen <pq@iki.fi>
+Cc: Stuart Bennett <stuart@freedesktop.org>
+Cc: Marcin Kocielnicki <koriakin@0x04.net>
+Cc: nouveau@lists.freedesktop.org
+LKML-Reference: <20100613215654.GA3829@joi.lan>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/kmmio.c         |   16 +++++++++++++---
+ arch/x86/mm/testmmiotrace.c |   22 ++++++++++++++++++++++
+ 2 files changed, 35 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -45,6 +45,8 @@ struct kmmio_fault_page {
+        * Protected by kmmio_lock, when linked into kmmio_page_table.
+        */
+       int count;
++
++      bool scheduled_for_release;
+ };
+ struct kmmio_delayed_release {
+@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(uns
+       BUG_ON(f->count < 0);
+       if (!f->count) {
+               disarm_kmmio_fault_page(f);
+-              f->release_next = *release_list;
+-              *release_list = f;
++              if (!f->scheduled_for_release) {
++                      f->release_next = *release_list;
++                      *release_list = f;
++                      f->scheduled_for_release = true;
++              }
+       }
+ }
+@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(str
+                       prevp = &f->release_next;
+               } else {
+                       *prevp = f->release_next;
++                      f->release_next = NULL;
++                      f->scheduled_for_release = false;
+               }
+-              f = f->release_next;
++              f = *prevp;
+       }
+       spin_unlock_irqrestore(&kmmio_lock, flags);
+@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio
+       kmmio_count--;
+       spin_unlock_irqrestore(&kmmio_lock, flags);
++      if (!release_list)
++              return;
++
+       drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
+       if (!drelease) {
+               pr_crit("leaking kmmio_fault_page objects.\n");
+--- a/arch/x86/mm/testmmiotrace.c
++++ b/arch/x86/mm/testmmiotrace.c
+@@ -90,6 +90,27 @@ static void do_test(unsigned long size)
+       iounmap(p);
+ }
++/*
++ * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in
++ * a short time. We had a bug in deferred freeing procedure which tried
++ * to free this region multiple times (ioremap can reuse the same address
++ * for many mappings).
++ */
++static void do_test_bulk_ioremapping(void)
++{
++      void __iomem *p;
++      int i;
++
++      for (i = 0; i < 10; ++i) {
++              p = ioremap_nocache(mmio_address, PAGE_SIZE);
++              if (p)
++                      iounmap(p);
++      }
++
++      /* Force freeing. If it will crash we will know why. */
++      synchronize_rcu();
++}
++
+ static int __init init(void)
+ {
+       unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
+@@ -104,6 +125,7 @@ static int __init init(void)
+                  "and writing 16 kB of rubbish in there.\n",
+                  size >> 10, mmio_address);
+       do_test(size);
++      do_test_bulk_ioremapping();
+       pr_info("All done.\n");
+       return 0;
+ }
diff --git a/queue-2.6.35/x86-pci-use-host-bridge-_crs-info-on-asrock-alivesata2-glan.patch b/queue-2.6.35/x86-pci-use-host-bridge-_crs-info-on-asrock-alivesata2-glan.patch
new file mode 100644 (file)
index 0000000..0990e4e
--- /dev/null
@@ -0,0 +1,77 @@
+From 2491762cfb475dbdfa3db11ebea6de49f58b7fac Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Date: Fri, 23 Jul 2010 12:53:27 -0600
+Subject: x86/PCI: use host bridge _CRS info on ASRock ALiveSATA2-GLAN
+
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+
+commit 2491762cfb475dbdfa3db11ebea6de49f58b7fac upstream.
+
+This DMI quirk turns on "pci=use_crs" for the ALiveSATA2-GLAN because
+amd_bus.c doesn't handle this system correctly.
+
+The system has a single HyperTransport I/O chain, but has two PCI host
+bridges to buses 00 and 80.  amd_bus.c learns the MMIO range associated
+with buses 00-ff and that this range is routed to the HT chain hosted at
+node 0, link 0:
+
+    bus: [00, ff] on node 0 link 0
+    bus: 00 index 1 [mem 0x80000000-0xfcffffffff]
+
+This includes the address space for both bus 00 and bus 80, and amd_bus.c
+assumes it's all routed to bus 00.
+
+We find device 80:01.0, which BIOS left in the middle of that space, but
+we don't find a bridge from bus 00 to bus 80, so we conclude that 80:01.0
+is unreachable from bus 00, and we move it from the original, working,
+address to something outside the bus 00 aperture, which does not work:
+
+    pci 0000:80:01.0: reg 10: [mem 0xfebfc000-0xfebfffff 64bit]
+    pci 0000:80:01.0: BAR 0: assigned [mem 0xfd00000000-0xfd00003fff 64bit]
+
+The BIOS told us everything we need to know to handle this correctly,
+so we're better off if we just pay attention, which lets us leave the
+80:01.0 device at the original, working, address:
+
+    ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-7f])
+    pci_root PNP0A03:00: host bridge window [mem 0x80000000-0xff37ffff]
+    ACPI: PCI Root Bridge [PCI1] (domain 0000 [bus 80-ff])
+    pci_root PNP0A08:00: host bridge window [mem 0xfebfc000-0xfebfffff]
+
+This was a regression between 2.6.33 and 2.6.34.  In 2.6.33, amd_bus.c
+was used only when we found multiple HT chains.  3e3da00c01d050, which
+enabled amd_bus.c even on systems with a single HT chain, caused this
+failure.
+
+This quirk was written by Graham.  If we ever enable "pci=use_crs" for
+machines from 2006 or earlir, this quirk should be removed.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16007
+
+Reported-by: Graham Ramsey <ramsey.graham@ntlworld.com>
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/pci/acpi.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -34,6 +34,15 @@ static const struct dmi_system_id pci_us
+                       DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
+               },
+       },
++      /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
++      /* 2006 AMD HT/VIA system with two host bridges */
++        {
++              .callback = set_use_crs,
++              .ident = "ASRock ALiveSATA2-GLAN",
++              .matches = {
++                      DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
++                },
++        },
+       {}
+ };
diff --git a/queue-2.6.35/x86-vmware-preset-lpj-values-when-on-vmware.patch b/queue-2.6.35/x86-vmware-preset-lpj-values-when-on-vmware.patch
new file mode 100644 (file)
index 0000000..b756f22
--- /dev/null
@@ -0,0 +1,51 @@
+From 9f242dc10e0c3c1eb32d8c83c18650a35fd7f80d Mon Sep 17 00:00:00 2001
+From: Alok Kataria <akataria@vmware.com>
+Date: Mon, 2 Aug 2010 16:10:37 -0700
+Subject: x86, vmware: Preset lpj values when on VMware.
+
+From: Alok Kataria <akataria@vmware.com>
+
+commit 9f242dc10e0c3c1eb32d8c83c18650a35fd7f80d upstream.
+
+When running on VMware's platform, we have seen situations where
+the AP's try to calibrate the lpj values and fail to get good calibration
+runs becasue of timing issues. As a result delays don't work correctly
+on all cpus.
+
+The solutions is to set preset_lpj value based on the current tsc frequency
+value. This is similar to what KVM does as well.
+
+Signed-off-by: Alok N Kataria <akataria@vmware.com>
+LKML-Reference: <1280790637.14933.29.camel@ank32.eng.vmware.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/vmware.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/vmware.c
++++ b/arch/x86/kernel/cpu/vmware.c
+@@ -51,7 +51,7 @@ static inline int __vmware_platform(void
+ static unsigned long vmware_get_tsc_khz(void)
+ {
+-      uint64_t tsc_hz;
++      uint64_t tsc_hz, lpj;
+       uint32_t eax, ebx, ecx, edx;
+       VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+@@ -62,6 +62,13 @@ static unsigned long vmware_get_tsc_khz(
+       printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
+                        (unsigned long) tsc_hz / 1000,
+                        (unsigned long) tsc_hz % 1000);
++
++      if (!preset_lpj) {
++              lpj = ((u64)tsc_hz * 1000);
++              do_div(lpj, HZ);
++              preset_lpj = lpj;
++      }
++
+       return tsc_hz;
+ }