]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.32 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 10 Aug 2010 22:50:25 +0000 (15:50 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 10 Aug 2010 22:50:25 +0000 (15:50 -0700)
queue-2.6.32/ata_piix-fix-locking-around-sidpr-access.patch [new file with mode: 0644]
queue-2.6.32/nvram-fix-write-beyond-end-condition-prove-to-gcc-copy-is-safe.patch [new file with mode: 0644]
queue-2.6.32/powerpc-fix-build-with-make-3.82.patch [new file with mode: 0644]
queue-2.6.32/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch [new file with mode: 0644]

diff --git a/queue-2.6.32/ata_piix-fix-locking-around-sidpr-access.patch b/queue-2.6.32/ata_piix-fix-locking-around-sidpr-access.patch
new file mode 100644 (file)
index 0000000..8fd9217
--- /dev/null
@@ -0,0 +1,77 @@
+From 213373cf974fe69e78ec894b07f45ae2f5a3a078 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 20 Jul 2010 16:20:01 +0200
+Subject: ata_piix: fix locking around SIDPR access
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 213373cf974fe69e78ec894b07f45ae2f5a3a078 upstream.
+
+SIDPR window registers are shared across ports and as each access is
+done in two steps, accesses to different ports under EH may race.
+This primarily is caused by incorrect host locking in EH context and
+should be fixed by defining locking requirements for each EH operation
+which can be used during EH and enforcing them but for now work around
+the problem by adding a dedicated SIDPR lock and grabbing it for each
+SIDPR access.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Mark Knecht <markknecht@gmail.com>
+Reported-by: Paul Check <paul@thechecks.ca>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/ata_piix.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -157,6 +157,7 @@ struct piix_map_db {
+ struct piix_host_priv {
+       const int *map;
+       u32 saved_iocfg;
++      spinlock_t sidpr_lock;  /* FIXME: remove once locking in EH is fixed */
+       void __iomem *sidpr;
+ };
+@@ -948,12 +949,15 @@ static int piix_sidpr_scr_read(struct at
+                              unsigned int reg, u32 *val)
+ {
+       struct piix_host_priv *hpriv = link->ap->host->private_data;
++      unsigned long flags;
+       if (reg >= ARRAY_SIZE(piix_sidx_map))
+               return -EINVAL;
++      spin_lock_irqsave(&hpriv->sidpr_lock, flags);
+       piix_sidpr_sel(link, reg);
+       *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
++      spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
+       return 0;
+ }
+@@ -961,12 +965,15 @@ static int piix_sidpr_scr_write(struct a
+                               unsigned int reg, u32 val)
+ {
+       struct piix_host_priv *hpriv = link->ap->host->private_data;
++      unsigned long flags;
+       if (reg >= ARRAY_SIZE(piix_sidx_map))
+               return -EINVAL;
++      spin_lock_irqsave(&hpriv->sidpr_lock, flags);
+       piix_sidpr_sel(link, reg);
+       iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
++      spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
+       return 0;
+ }
+@@ -1555,6 +1562,7 @@ static int __devinit piix_init_one(struc
+       hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+       if (!hpriv)
+               return -ENOMEM;
++      spin_lock_init(&hpriv->sidpr_lock);
+       /* Save IOCFG, this will be used for cable detection, quirk
+        * detection and restoration on detach.  This is necessary
diff --git a/queue-2.6.32/nvram-fix-write-beyond-end-condition-prove-to-gcc-copy-is-safe.patch b/queue-2.6.32/nvram-fix-write-beyond-end-condition-prove-to-gcc-copy-is-safe.patch
new file mode 100644 (file)
index 0000000..ab54035
--- /dev/null
@@ -0,0 +1,64 @@
+From a01c7800420d2c294ca403988488a635d4087a6d Mon Sep 17 00:00:00 2001
+From: H. Peter Anvin <hpa@zytor.com>
+Date: Fri, 11 Dec 2009 15:48:23 -0800
+Subject: nvram: Fix write beyond end condition; prove to gcc copy is safe
+
+From: H. Peter Anvin <hpa@zytor.com>
+
+commit a01c7800420d2c294ca403988488a635d4087a6d upstream.
+
+In nvram_write, first of all, correctly handle the case where the file
+pointer is already beyond the end; we should return EOF in that case.
+
+Second, make the logic a bit more explicit so that gcc can statically
+prove that the copy_from_user() is safe.  Once the condition of the
+beyond-end filepointer is eliminated, the copy is safe but gcc can't
+prove it, causing build failures for i386 allyesconfig.
+
+Third, eliminate the entirely superfluous variable "len", and just use
+the passed-in variable "count" instead.
+
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Arjan van de Ven <arjan@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Wim Van Sebroeck <wim@iguana.be>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+LKML-Reference: <tip-*@git.kernel.org>
+Cc: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/nvram.c |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/char/nvram.c
++++ b/drivers/char/nvram.c
+@@ -265,10 +265,16 @@ static ssize_t nvram_write(struct file *
+       unsigned char contents[NVRAM_BYTES];
+       unsigned i = *ppos;
+       unsigned char *tmp;
+-      int len;
+-      len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count;
+-      if (copy_from_user(contents, buf, len))
++      if (i >= NVRAM_BYTES)
++              return 0;       /* Past EOF */
++
++      if (count > NVRAM_BYTES - i)
++              count = NVRAM_BYTES - i;
++      if (count > NVRAM_BYTES)
++              return -EFAULT; /* Can't happen, but prove it to gcc */
++
++      if (copy_from_user(contents, buf, count))
+               return -EFAULT;
+       spin_lock_irq(&rtc_lock);
+@@ -276,7 +282,7 @@ static ssize_t nvram_write(struct file *
+       if (!__nvram_check_checksum())
+               goto checksum_err;
+-      for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
++      for (tmp = contents; count--; ++i, ++tmp)
+               __nvram_write_byte(*tmp, i);
+       __nvram_set_checksum();
diff --git a/queue-2.6.32/powerpc-fix-build-with-make-3.82.patch b/queue-2.6.32/powerpc-fix-build-with-make-3.82.patch
new file mode 100644 (file)
index 0000000..a15263a
--- /dev/null
@@ -0,0 +1,62 @@
+From e32e78c5ee8aadef020fbaecbe6fb741ed9029fd Mon Sep 17 00:00:00 2001
+From: Sam Ravnborg <sam@ravnborg.org>
+Date: Mon, 2 Aug 2010 20:47:48 +0000
+Subject: powerpc: fix build with make 3.82
+
+From: Sam Ravnborg <sam@ravnborg.org>
+
+commit e32e78c5ee8aadef020fbaecbe6fb741ed9029fd upstream.
+
+Thomas Backlund reported that the powerpc build broke with make 3.82.
+It failed with the following message:
+
+    arch/powerpc/Makefile:183: *** mixed implicit and normal rules.  Stop.
+
+The fix is to avoid mixing non-wildcard and wildcard targets.
+
+Reported-by: Thomas Backlund <tmb@mandriva.org>
+Tested-by: Thomas Backlund <tmb@mandriva.org>
+Cc: Michal Marek <mmarek@suse.cz>
+Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/Makefile |   16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -158,9 +158,11 @@ drivers-$(CONFIG_OPROFILE)        += arch/power
+ # Default to zImage, override when needed
+ all: zImage
+-BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
++# With make 3.82 we cannot mix normal and wildcard targets
++BOOT_TARGETS1 := zImage zImage.initrd uImaged
++BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+-PHONY += $(BOOT_TARGETS)
++PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
+ boot := arch/$(ARCH)/boot
+@@ -175,10 +177,16 @@ relocs_check: arch/powerpc/relocs_check.
+ zImage: relocs_check
+ endif
+-$(BOOT_TARGETS): vmlinux
++$(BOOT_TARGETS1): vmlinux
++      $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++$(BOOT_TARGETS2): vmlinux
++      $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
++
++
++bootwrapper_install:
+       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+-bootwrapper_install %.dtb:
++%.dtb:
+       $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ define archhelp
diff --git a/queue-2.6.32/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch b/queue-2.6.32/x86-add-memory-modify-constraints-to-xchg-and-cmpxchg.patch
new file mode 100644 (file)
index 0000000..cf5afa9
--- /dev/null
@@ -0,0 +1,352 @@
+From 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 Mon Sep 17 00:00:00 2001
+From: H. Peter Anvin <hpa@zytor.com>
+Date: Tue, 27 Jul 2010 17:01:49 -0700
+Subject: x86: Add memory modify constraints to xchg() and cmpxchg()
+
+From: H. Peter Anvin <hpa@zytor.com>
+
+commit 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 upstream.
+
+xchg() and cmpxchg() modify their memory operands, not merely read
+them.  For some versions of gcc the "memory" clobber has apparently
+dealt with the situation, but not for all.
+
+Originally-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: Glauber Costa <glommer@redhat.com>
+Cc: Avi Kivity <avi@redhat.com>
+Cc: Peter Palfrader <peter@palfrader.org>
+Cc: Greg KH <gregkh@suse.de>
+Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Cc: Zachary Amsden <zamsden@redhat.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+LKML-Reference: <4C4F7277.8050306@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/cmpxchg_32.h |   86 ++++++++++++++++++-------------------
+ arch/x86/include/asm/cmpxchg_64.h |   88 ++++++++++++++++++++------------------
+ 2 files changed, 89 insertions(+), 85 deletions(-)
+
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -34,12 +34,12 @@ static inline void __set_64bit(unsigned
+                              unsigned int low, unsigned int high)
+ {
+       asm volatile("\n1:\t"
+-                   "movl (%0), %%eax\n\t"
+-                   "movl 4(%0), %%edx\n\t"
+-                   LOCK_PREFIX "cmpxchg8b (%0)\n\t"
++                   "movl (%1), %%eax\n\t"
++                   "movl 4(%1), %%edx\n\t"
++                   LOCK_PREFIX "cmpxchg8b %0\n\t"
+                    "jnz 1b"
+-                   : /* no outputs */
+-                   : "D"(ptr),
++                   : "=m"(*ptr)
++                   : "D" (ptr),
+                      "b"(low),
+                      "c"(high)
+                    : "ax", "dx", "memory");
+@@ -82,20 +82,20 @@ static inline unsigned long __xchg(unsig
+       switch (size) {
+       case 1:
+               asm volatile("xchgb %b0,%1"
+-                           : "=q" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=q" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       case 2:
+               asm volatile("xchgw %w0,%1"
+-                           : "=r" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=r" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       case 4:
+               asm volatile("xchgl %0,%1"
+-                           : "=r" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=r" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       }
+@@ -139,21 +139,21 @@ static inline unsigned long __cmpxchg(vo
+       unsigned long prev;
+       switch (size) {
+       case 1:
+-              asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+-                           : "=a"(prev)
+-                           : "q"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "q"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+-              asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+-              asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgl %2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       }
+@@ -172,21 +172,21 @@ static inline unsigned long __sync_cmpxc
+       unsigned long prev;
+       switch (size) {
+       case 1:
+-              asm volatile("lock; cmpxchgb %b1,%2"
+-                           : "=a"(prev)
+-                           : "q"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("lock; cmpxchgb %b2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "q"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+-              asm volatile("lock; cmpxchgw %w1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("lock; cmpxchgw %w2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+-              asm volatile("lock; cmpxchgl %1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("lock; cmpxchgl %2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       }
+@@ -200,21 +200,21 @@ static inline unsigned long __cmpxchg_lo
+       unsigned long prev;
+       switch (size) {
+       case 1:
+-              asm volatile("cmpxchgb %b1,%2"
+-                           : "=a"(prev)
+-                           : "q"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgb %b2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "q"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+-              asm volatile("cmpxchgw %w1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgw %w2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+-              asm volatile("cmpxchgl %1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgl %2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       }
+@@ -226,11 +226,10 @@ static inline unsigned long long __cmpxc
+                                            unsigned long long new)
+ {
+       unsigned long long prev;
+-      asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+-                   : "=A"(prev)
++      asm volatile(LOCK_PREFIX "cmpxchg8b %1"
++                   : "=A"(prev), "+m" (*__xg(ptr))
+                    : "b"((unsigned long)new),
+                      "c"((unsigned long)(new >> 32)),
+-                     "m"(*__xg(ptr)),
+                      "0"(old)
+                    : "memory");
+       return prev;
+@@ -241,11 +240,10 @@ static inline unsigned long long __cmpxc
+                                                  unsigned long long new)
+ {
+       unsigned long long prev;
+-      asm volatile("cmpxchg8b %3"
+-                   : "=A"(prev)
++      asm volatile("cmpxchg8b %1"
++                   : "=A"(prev), "+m"(*__xg(ptr))
+                    : "b"((unsigned long)new),
+                      "c"((unsigned long)(new >> 32)),
+-                     "m"(*__xg(ptr)),
+                      "0"(old)
+                    : "memory");
+       return prev;
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -26,26 +26,26 @@ static inline unsigned long __xchg(unsig
+       switch (size) {
+       case 1:
+               asm volatile("xchgb %b0,%1"
+-                           : "=q" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=q" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       case 2:
+               asm volatile("xchgw %w0,%1"
+-                           : "=r" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=r" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       case 4:
+               asm volatile("xchgl %k0,%1"
+-                           : "=r" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=r" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       case 8:
+               asm volatile("xchgq %0,%1"
+-                           : "=r" (x)
+-                           : "m" (*__xg(ptr)), "0" (x)
++                           : "=r" (x), "+m" (*__xg(ptr))
++                           : "0" (x)
+                            : "memory");
+               break;
+       }
+@@ -66,27 +66,27 @@ static inline unsigned long __cmpxchg(vo
+       unsigned long prev;
+       switch (size) {
+       case 1:
+-              asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+-                           : "=a"(prev)
+-                           : "q"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "q"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+-              asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+-              asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgl %k2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 8:
+-              asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile(LOCK_PREFIX "cmpxchgq %2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       }
+@@ -105,21 +105,27 @@ static inline unsigned long __sync_cmpxc
+       unsigned long prev;
+       switch (size) {
+       case 1:
+-              asm volatile("lock; cmpxchgb %b1,%2"
+-                           : "=a"(prev)
+-                           : "q"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("lock; cmpxchgb %b2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "q"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+-              asm volatile("lock; cmpxchgw %w1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("lock; cmpxchgw %w2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+-              asm volatile("lock; cmpxchgl %1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("lock; cmpxchgl %k2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
++                           : "memory");
++              return prev;
++      case 8:
++              asm volatile("lock; cmpxchgq %2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       }
+@@ -133,27 +139,27 @@ static inline unsigned long __cmpxchg_lo
+       unsigned long prev;
+       switch (size) {
+       case 1:
+-              asm volatile("cmpxchgb %b1,%2"
+-                           : "=a"(prev)
+-                           : "q"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgb %b2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "q"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 2:
+-              asm volatile("cmpxchgw %w1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgw %w2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 4:
+-              asm volatile("cmpxchgl %k1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgl %k2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       case 8:
+-              asm volatile("cmpxchgq %1,%2"
+-                           : "=a"(prev)
+-                           : "r"(new), "m"(*__xg(ptr)), "0"(old)
++              asm volatile("cmpxchgq %2,%1"
++                           : "=a"(prev), "+m"(*__xg(ptr))
++                           : "r"(new), "0"(old)
+                            : "memory");
+               return prev;
+       }