From: Greg Kroah-Hartman Date: Tue, 10 Aug 2010 22:48:39 +0000 (-0700) Subject: .34 patches X-Git-Tag: v2.6.32.19~21 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0617ea8efe0d277539126a44fdb0a87809d840ec;p=thirdparty%2Fkernel%2Fstable-queue.git .34 patches --- diff --git a/queue-2.6.34/ata_piix-fix-locking-around-sidpr-access.patch b/queue-2.6.34/ata_piix-fix-locking-around-sidpr-access.patch new file mode 100644 index 00000000000..9d41c76a04b --- /dev/null +++ b/queue-2.6.34/ata_piix-fix-locking-around-sidpr-access.patch @@ -0,0 +1,77 @@ +From 213373cf974fe69e78ec894b07f45ae2f5a3a078 Mon Sep 17 00:00:00 2001 +From: Tejun Heo +Date: Tue, 20 Jul 2010 16:20:01 +0200 +Subject: ata_piix: fix locking around SIDPR access + +From: Tejun Heo + +commit 213373cf974fe69e78ec894b07f45ae2f5a3a078 upstream. + +SIDPR window registers are shared across ports and as each access is +done in two steps, accesses to different ports under EH may race. +This primarily is caused by incorrect host locking in EH context and +should be fixed by defining locking requirements for each EH operation +which can be used during EH and enforcing them but for now work around +the problem by adding a dedicated SIDPR lock and grabbing it for each +SIDPR access. + +Signed-off-by: Tejun Heo +Reported-by: Mark Knecht +Reported-by: Paul Check +Signed-off-by: Jeff Garzik +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/ata/ata_piix.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/drivers/ata/ata_piix.c ++++ b/drivers/ata/ata_piix.c +@@ -158,6 +158,7 @@ struct piix_map_db { + struct piix_host_priv { + const int *map; + u32 saved_iocfg; ++ spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */ + void __iomem *sidpr; + }; + +@@ -951,12 +952,15 @@ static int piix_sidpr_scr_read(struct at + unsigned int reg, u32 *val) + { + struct piix_host_priv *hpriv = link->ap->host->private_data; ++ unsigned long flags; + + if (reg >= ARRAY_SIZE(piix_sidx_map)) + return -EINVAL; + ++ spin_lock_irqsave(&hpriv->sidpr_lock, flags); + piix_sidpr_sel(link, reg); + *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); ++ spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); + return 0; + } + +@@ -964,12 +968,15 @@ static int piix_sidpr_scr_write(struct a + unsigned int reg, u32 val) + { + struct piix_host_priv *hpriv = link->ap->host->private_data; ++ unsigned long flags; + + if (reg >= ARRAY_SIZE(piix_sidx_map)) + return -EINVAL; + ++ spin_lock_irqsave(&hpriv->sidpr_lock, flags); + piix_sidpr_sel(link, reg); + iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); ++ spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); + return 0; + } + +@@ -1566,6 +1573,7 @@ static int __devinit piix_init_one(struc + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; ++ spin_lock_init(&hpriv->sidpr_lock); + + /* Save IOCFG, this will be used for cable detection, quirk + * detection and restoration on detach. This is necessary diff --git a/queue-2.6.34/powerpc-fix-build-with-make-3.82.patch b/queue-2.6.34/powerpc-fix-build-with-make-3.82.patch new file mode 100644 index 00000000000..a15263a8b2b --- /dev/null +++ b/queue-2.6.34/powerpc-fix-build-with-make-3.82.patch @@ -0,0 +1,62 @@ +From e32e78c5ee8aadef020fbaecbe6fb741ed9029fd Mon Sep 17 00:00:00 2001 +From: Sam Ravnborg +Date: Mon, 2 Aug 2010 20:47:48 +0000 +Subject: powerpc: fix build with make 3.82 + +From: Sam Ravnborg + +commit e32e78c5ee8aadef020fbaecbe6fb741ed9029fd upstream. + +Thomas Backlund reported that the powerpc build broke with make 3.82. +It failed with the following message: + + arch/powerpc/Makefile:183: *** mixed implicit and normal rules. Stop. + +The fix is to avoid mixing non-wildcard and wildcard targets. + +Reported-by: Thomas Backlund +Tested-by: Thomas Backlund +Cc: Michal Marek +Signed-off-by: Sam Ravnborg +Signed-off-by: Benjamin Herrenschmidt +Signed-off-by: Greg Kroah-Hartman + +--- + arch/powerpc/Makefile | 16 ++++++++++++---- + 1 file changed, 12 insertions(+), 4 deletions(-) + +--- a/arch/powerpc/Makefile ++++ b/arch/powerpc/Makefile +@@ -158,9 +158,11 @@ drivers-$(CONFIG_OPROFILE) += arch/power + # Default to zImage, override when needed + all: zImage + +-BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.% ++# With make 3.82 we cannot mix normal and wildcard targets ++BOOT_TARGETS1 := zImage zImage.initrd uImaged ++BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% + +-PHONY += $(BOOT_TARGETS) ++PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) + + boot := arch/$(ARCH)/boot + +@@ -175,10 +177,16 @@ relocs_check: arch/powerpc/relocs_check. + zImage: relocs_check + endif + +-$(BOOT_TARGETS): vmlinux ++$(BOOT_TARGETS1): vmlinux ++ $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) ++$(BOOT_TARGETS2): vmlinux ++ $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) ++ ++ ++bootwrapper_install: + $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + +-bootwrapper_install %.dtb: ++%.dtb: + $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + + define archhelp diff --git a/queue-2.6.34/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch b/queue-2.6.34/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch new file mode 100644 index 00000000000..d294d4f2c75 --- /dev/null +++ b/queue-2.6.34/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch @@ -0,0 +1,227 @@ +From 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 Mon Sep 17 00:00:00 2001 +From: H. Peter Anvin +Date: Tue, 27 Jul 2010 17:01:49 -0700 +Subject: x86: Add memory modify constraints to xchg() and cmpxchg() + +From: H. Peter Anvin + +commit 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 upstream. + +xchg() and cmpxchg() modify their memory operands, not merely read +them. For some versions of gcc the "memory" clobber has apparently +dealt with the situation, but not for all. + +Originally-by: Linus Torvalds +Signed-off-by: H. Peter Anvin +Cc: Glauber Costa +Cc: Avi Kivity +Cc: Peter Palfrader +Cc: Greg KH +Cc: Alan Cox +Cc: Zachary Amsden +Cc: Marcelo Tosatti +LKML-Reference: <4C4F7277.8050306@zytor.com> +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/cmpxchg_32.h | 68 +++++++++++++++++++------------------- + arch/x86/include/asm/cmpxchg_64.h | 40 +++++++++++----------- + 2 files changed, 54 insertions(+), 54 deletions(-) + +--- a/arch/x86/include/asm/cmpxchg_32.h ++++ b/arch/x86/include/asm/cmpxchg_32.h +@@ -27,20 +27,20 @@ struct __xchg_dummy { + switch (size) { \ + case 1: \ + asm volatile("xchgb %b0,%1" \ +- : "=q" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=q" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("xchgw %w0,%1" \ +- : "=r" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=r" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("xchgl %0,%1" \ +- : "=r" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=r" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + default: \ +@@ -70,14 +70,14 @@ static inline void __set_64bit(unsigned + unsigned int low, unsigned int high) + { + asm volatile("\n1:\t" +- "movl (%0), %%eax\n\t" +- "movl 4(%0), %%edx\n\t" +- LOCK_PREFIX "cmpxchg8b (%0)\n\t" ++ "movl (%1), %%eax\n\t" ++ "movl 4(%1), %%edx\n\t" ++ LOCK_PREFIX "cmpxchg8b (%1)\n\t" + "jnz 1b" +- : /* no outputs */ +- : "D"(ptr), +- "b"(low), +- "c"(high) ++ : "=m" (*ptr) ++ : "D" (ptr), ++ "b" (low), ++ "c" (high) + : "ax", "dx", "memory"); + } + +@@ -121,21 +121,21 @@ extern void __cmpxchg_wrong_size(void); + __typeof__(*(ptr)) __new = (new); \ + switch (size) { \ + case 1: \ +- asm volatile(lock "cmpxchgb %b1,%2" \ +- : "=a"(__ret) \ +- : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgb %b2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "q" (__new), "0" (__old) \ + : "memory"); \ + break; \ + case 2: \ +- asm volatile(lock "cmpxchgw %w1,%2" \ +- : "=a"(__ret) \ +- : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgw %w2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + case 4: \ +- asm volatile(lock "cmpxchgl %1,%2" \ +- : "=a"(__ret) \ +- : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgl %2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + default: \ +@@ -180,12 +180,12 @@ static inline unsigned long long __cmpxc + unsigned long long new) + { + unsigned long long prev; +- asm volatile(LOCK_PREFIX "cmpxchg8b %3" +- : "=A"(prev) +- : "b"((unsigned long)new), +- "c"((unsigned long)(new >> 32)), +- "m"(*__xg(ptr)), +- "0"(old) ++ asm volatile(LOCK_PREFIX "cmpxchg8b %1" ++ : "=A" (prev), ++ "+m" (*__xg(ptr)) ++ : "b" ((unsigned long)new), ++ "c" ((unsigned long)(new >> 32)), ++ "0" (old) + : "memory"); + return prev; + } +@@ -195,12 +195,12 @@ static inline unsigned long long __cmpxc + unsigned long long new) + { + unsigned long long prev; +- asm volatile("cmpxchg8b %3" +- : "=A"(prev) +- : "b"((unsigned long)new), +- "c"((unsigned long)(new >> 32)), +- "m"(*__xg(ptr)), +- "0"(old) ++ asm volatile("cmpxchg8b %1" ++ : "=A" (prev), ++ "+m" (*__xg(ptr)) ++ : "b" ((unsigned long)new), ++ "c" ((unsigned long)(new >> 32)), ++ "0" (old) + : "memory"); + return prev; + } +--- a/arch/x86/include/asm/cmpxchg_64.h ++++ b/arch/x86/include/asm/cmpxchg_64.h +@@ -26,26 +26,26 @@ extern void __cmpxchg_wrong_size(void); + switch (size) { \ + case 1: \ + asm volatile("xchgb %b0,%1" \ +- : "=q" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=q" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("xchgw %w0,%1" \ +- : "=r" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=r" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("xchgl %k0,%1" \ +- : "=r" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=r" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + case 8: \ + asm volatile("xchgq %0,%1" \ +- : "=r" (__x) \ +- : "m" (*__xg(ptr)), "0" (__x) \ ++ : "=r" (__x), "+m" (*__xg(ptr)) \ ++ : "0" (__x) \ + : "memory"); \ + break; \ + default: \ +@@ -71,27 +71,27 @@ extern void __cmpxchg_wrong_size(void); + __typeof__(*(ptr)) __new = (new); \ + switch (size) { \ + case 1: \ +- asm volatile(lock "cmpxchgb %b1,%2" \ +- : "=a"(__ret) \ +- : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgb %b2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "q" (__new), "0" (__old) \ + : "memory"); \ + break; \ + case 2: \ +- asm volatile(lock "cmpxchgw %w1,%2" \ +- : "=a"(__ret) \ +- : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgw %w2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + case 4: \ +- asm volatile(lock "cmpxchgl %k1,%2" \ +- : "=a"(__ret) \ +- : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgl %k2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + case 8: \ +- asm volatile(lock "cmpxchgq %1,%2" \ +- : "=a"(__ret) \ +- : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ ++ asm volatile(lock "cmpxchgq %2,%1" \ ++ : "=a" (__ret), "+m" (*__xg(ptr)) \ ++ : "r" (__new), "0" (__old) \ + : "memory"); \ + break; \ + default: \ diff --git a/queue-2.6.34/x86-kmmio-mmiotrace-fix-double-free-of-kmmio_fault_pages.patch b/queue-2.6.34/x86-kmmio-mmiotrace-fix-double-free-of-kmmio_fault_pages.patch new file mode 100644 index 00000000000..fc370db0940 --- /dev/null +++ b/queue-2.6.34/x86-kmmio-mmiotrace-fix-double-free-of-kmmio_fault_pages.patch @@ -0,0 +1,122 @@ +From 8b8f79b927b6b302bb65fb8c56e7a19be5fbdbef Mon Sep 17 00:00:00 2001 +From: Marcin Slusarz +Date: Sun, 13 Jun 2010 23:56:54 +0200 +Subject: x86, kmmio/mmiotrace: Fix double free of kmmio_fault_pages + +From: Marcin Slusarz + +commit 8b8f79b927b6b302bb65fb8c56e7a19be5fbdbef upstream. + +After every iounmap mmiotrace has to free kmmio_fault_pages, but +it can't do it directly, so it defers freeing by RCU. + +It usually works, but when mmiotraced code calls ioremap-iounmap +multiple times without sleeping between (so RCU won't kick in +and start freeing) it can be given the same virtual address, so +at every iounmap mmiotrace will schedule the same pages for +release. Obviously it will explode on second free. + +Fix it by marking kmmio_fault_pages which are scheduled for +release and not adding them second time. + +Signed-off-by: Marcin Slusarz +Tested-by: Marcin Kocielnicki +Tested-by: Shinpei KATO +Acked-by: Pekka Paalanen +Cc: Stuart Bennett +Cc: Marcin Kocielnicki +Cc: nouveau@lists.freedesktop.org +LKML-Reference: <20100613215654.GA3829@joi.lan> +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/mm/kmmio.c | 16 +++++++++++++--- + arch/x86/mm/testmmiotrace.c | 22 ++++++++++++++++++++++ + 2 files changed, 35 insertions(+), 3 deletions(-) + +--- a/arch/x86/mm/kmmio.c ++++ b/arch/x86/mm/kmmio.c +@@ -45,6 +45,8 @@ struct kmmio_fault_page { + * Protected by kmmio_lock, when linked into kmmio_page_table. + */ + int count; ++ ++ bool scheduled_for_release; + }; + + struct kmmio_delayed_release { +@@ -398,8 +400,11 @@ static void release_kmmio_fault_page(uns + BUG_ON(f->count < 0); + if (!f->count) { + disarm_kmmio_fault_page(f); +- f->release_next = *release_list; +- *release_list = f; ++ if (!f->scheduled_for_release) { ++ f->release_next = *release_list; ++ *release_list = f; ++ f->scheduled_for_release = true; ++ } + } + } + +@@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(str + prevp = &f->release_next; + } else { + *prevp = f->release_next; ++ f->release_next = NULL; ++ f->scheduled_for_release = false; + } +- f = f->release_next; ++ f = *prevp; + } + spin_unlock_irqrestore(&kmmio_lock, flags); + +@@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio + kmmio_count--; + spin_unlock_irqrestore(&kmmio_lock, flags); + ++ if (!release_list) ++ return; ++ + drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); + if (!drelease) { + pr_crit("leaking kmmio_fault_page objects.\n"); +--- a/arch/x86/mm/testmmiotrace.c ++++ b/arch/x86/mm/testmmiotrace.c +@@ -90,6 +90,27 @@ static void do_test(unsigned long size) + iounmap(p); + } + ++/* ++ * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in ++ * a short time. We had a bug in deferred freeing procedure which tried ++ * to free this region multiple times (ioremap can reuse the same address ++ * for many mappings). ++ */ ++static void do_test_bulk_ioremapping(void) ++{ ++ void __iomem *p; ++ int i; ++ ++ for (i = 0; i < 10; ++i) { ++ p = ioremap_nocache(mmio_address, PAGE_SIZE); ++ if (p) ++ iounmap(p); ++ } ++ ++ /* Force freeing. If it will crash we will know why. */ ++ synchronize_rcu(); ++} ++ + static int __init init(void) + { + unsigned long size = (read_far) ? (8 << 20) : (16 << 10); +@@ -104,6 +125,7 @@ static int __init init(void) + "and writing 16 kB of rubbish in there.\n", + size >> 10, mmio_address); + do_test(size); ++ do_test_bulk_ioremapping(); + pr_info("All done.\n"); + return 0; + } diff --git a/queue-2.6.34/x86-pci-use-host-bridge-_crs-info-on-asrock-alivesata2-glan.patch b/queue-2.6.34/x86-pci-use-host-bridge-_crs-info-on-asrock-alivesata2-glan.patch new file mode 100644 index 00000000000..0990e4eb90a --- /dev/null +++ b/queue-2.6.34/x86-pci-use-host-bridge-_crs-info-on-asrock-alivesata2-glan.patch @@ -0,0 +1,77 @@ +From 2491762cfb475dbdfa3db11ebea6de49f58b7fac Mon Sep 17 00:00:00 2001 +From: Bjorn Helgaas +Date: Fri, 23 Jul 2010 12:53:27 -0600 +Subject: x86/PCI: use host bridge _CRS info on ASRock ALiveSATA2-GLAN + +From: Bjorn Helgaas + +commit 2491762cfb475dbdfa3db11ebea6de49f58b7fac upstream. + +This DMI quirk turns on "pci=use_crs" for the ALiveSATA2-GLAN because +amd_bus.c doesn't handle this system correctly. + +The system has a single HyperTransport I/O chain, but has two PCI host +bridges to buses 00 and 80. amd_bus.c learns the MMIO range associated +with buses 00-ff and that this range is routed to the HT chain hosted at +node 0, link 0: + + bus: [00, ff] on node 0 link 0 + bus: 00 index 1 [mem 0x80000000-0xfcffffffff] + +This includes the address space for both bus 00 and bus 80, and amd_bus.c +assumes it's all routed to bus 00. + +We find device 80:01.0, which BIOS left in the middle of that space, but +we don't find a bridge from bus 00 to bus 80, so we conclude that 80:01.0 +is unreachable from bus 00, and we move it from the original, working, +address to something outside the bus 00 aperture, which does not work: + + pci 0000:80:01.0: reg 10: [mem 0xfebfc000-0xfebfffff 64bit] + pci 0000:80:01.0: BAR 0: assigned [mem 0xfd00000000-0xfd00003fff 64bit] + +The BIOS told us everything we need to know to handle this correctly, +so we're better off if we just pay attention, which lets us leave the +80:01.0 device at the original, working, address: + + ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-7f]) + pci_root PNP0A03:00: host bridge window [mem 0x80000000-0xff37ffff] + ACPI: PCI Root Bridge [PCI1] (domain 0000 [bus 80-ff]) + pci_root PNP0A08:00: host bridge window [mem 0xfebfc000-0xfebfffff] + +This was a regression between 2.6.33 and 2.6.34. In 2.6.33, amd_bus.c +was used only when we found multiple HT chains. 3e3da00c01d050, which +enabled amd_bus.c even on systems with a single HT chain, caused this +failure. + +This quirk was written by Graham. If we ever enable "pci=use_crs" for +machines from 2006 or earlir, this quirk should be removed. + +Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16007 + +Reported-by: Graham Ramsey +Signed-off-by: Bjorn Helgaas +Signed-off-by: Jesse Barnes +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/pci/acpi.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/arch/x86/pci/acpi.c ++++ b/arch/x86/pci/acpi.c +@@ -34,6 +34,15 @@ static const struct dmi_system_id pci_us + DMI_MATCH(DMI_PRODUCT_NAME, "x3800"), + }, + }, ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */ ++ /* 2006 AMD HT/VIA system with two host bridges */ ++ { ++ .callback = set_use_crs, ++ .ident = "ASRock ALiveSATA2-GLAN", ++ .matches = { ++ DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), ++ }, ++ }, + {} + }; + diff --git a/queue-2.6.34/x86-vmware-preset-lpj-values-when-on-vmware.patch b/queue-2.6.34/x86-vmware-preset-lpj-values-when-on-vmware.patch new file mode 100644 index 00000000000..b756f229772 --- /dev/null +++ b/queue-2.6.34/x86-vmware-preset-lpj-values-when-on-vmware.patch @@ -0,0 +1,51 @@ +From 9f242dc10e0c3c1eb32d8c83c18650a35fd7f80d Mon Sep 17 00:00:00 2001 +From: Alok Kataria +Date: Mon, 2 Aug 2010 16:10:37 -0700 +Subject: x86, vmware: Preset lpj values when on VMware. + +From: Alok Kataria + +commit 9f242dc10e0c3c1eb32d8c83c18650a35fd7f80d upstream. + +When running on VMware's platform, we have seen situations where +the AP's try to calibrate the lpj values and fail to get good calibration +runs becasue of timing issues. As a result delays don't work correctly +on all cpus. + +The solutions is to set preset_lpj value based on the current tsc frequency +value. This is similar to what KVM does as well. + +Signed-off-by: Alok N Kataria +LKML-Reference: <1280790637.14933.29.camel@ank32.eng.vmware.com> +Signed-off-by: H. Peter Anvin +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/cpu/vmware.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +--- a/arch/x86/kernel/cpu/vmware.c ++++ b/arch/x86/kernel/cpu/vmware.c +@@ -51,7 +51,7 @@ static inline int __vmware_platform(void + + static unsigned long vmware_get_tsc_khz(void) + { +- uint64_t tsc_hz; ++ uint64_t tsc_hz, lpj; + uint32_t eax, ebx, ecx, edx; + + VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); +@@ -62,6 +62,13 @@ static unsigned long vmware_get_tsc_khz( + printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", + (unsigned long) tsc_hz / 1000, + (unsigned long) tsc_hz % 1000); ++ ++ if (!preset_lpj) { ++ lpj = ((u64)tsc_hz * 1000); ++ do_div(lpj, HZ); ++ preset_lpj = lpj; ++ } ++ + return tsc_hz; + } +