]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
another .24 patch
authorGreg Kroah-Hartman <gregkh@suse.de>
Thu, 7 Feb 2008 17:43:05 +0000 (09:43 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 7 Feb 2008 17:43:05 +0000 (09:43 -0800)
queue-2.6.24/series
queue-2.6.24/x86-replace-lock_prefix-in-futex.h.patch [new file with mode: 0644]

index e7a463b3f784baff67845e9c68471b8f784a26ae..129237c4186e0c34c39ac964ae412fd8a0127984 100644 (file)
@@ -40,3 +40,4 @@ b43-reject-new-firmware-early.patch
 sched-let-nice-tasks-have-smaller-impact.patch
 sched-fix-high-wake-up-latencies-with-fair_user_sched.patch
 fix-writev-regression-pan-hanging-unkillable-and-un-straceable.patch
+x86-replace-lock_prefix-in-futex.h.patch
diff --git a/queue-2.6.24/x86-replace-lock_prefix-in-futex.h.patch b/queue-2.6.24/x86-replace-lock_prefix-in-futex.h.patch
new file mode 100644 (file)
index 0000000..2a0e166
--- /dev/null
@@ -0,0 +1,86 @@
+Subject: x86: replace LOCK_PREFIX in futex.h
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 07 Feb 2008 13:06:02 +0100
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+The exception fixup for the futex macros __futex_atomic_op1/2 and
+futex_atomic_cmpxchg_inatomic() is missing an entry when the lock
+prefix is replaced by a NOP via SMP alternatives.
+
+Chuck Ebert tracked this down from the information provided in:
+https://bugzilla.redhat.com/show_bug.cgi?id=429412
+
+A possible solution would be to add another fixup after the
+LOCK_PREFIX, so both the LOCK and NOP case have their own entry in the
+exception table, but it's not really worth the trouble.
+
+Simply replace LOCK_PREFIX with lock and keep those untouched by SMP
+alternatives.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/asm-x86/futex_32.h |    6 +++---
+ include/asm-x86/futex_64.h |    6 +++---
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/asm-x86/futex_32.h
++++ b/include/asm-x86/futex_32.h
+@@ -28,7 +28,7 @@
+ "1:   movl    %2, %0\n\
+       movl    %0, %3\n"                                       \
+       insn "\n"                                               \
+-"2:   " LOCK_PREFIX "cmpxchgl %3, %2\n\
++"2:   lock cmpxchgl %3, %2\n\
+       jnz     1b\n\
+ 3:    .section .fixup,\"ax\"\n\
+ 4:    mov     %5, %1\n\
+@@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op, 
+ #endif
+               switch (op) {
+               case FUTEX_OP_ADD:
+-                      __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
++                      __futex_atomic_op1("lock xaddl %0, %2", ret,
+                                          oldval, uaddr, oparg);
+                       break;
+               case FUTEX_OP_OR:
+@@ -111,7 +111,7 @@ futex_atomic_cmpxchg_inatomic(int __user
+               return -EFAULT;
+       __asm__ __volatile__(
+-              "1:     " LOCK_PREFIX "cmpxchgl %3, %1          \n"
++              "1:     lock cmpxchgl %3, %1                    \n"
+               "2:     .section .fixup, \"ax\"                 \n"
+               "3:     mov     %2, %0                          \n"
+--- a/include/asm-x86/futex_64.h
++++ b/include/asm-x86/futex_64.h
+@@ -27,7 +27,7 @@
+ "1:   movl    %2, %0\n\
+       movl    %0, %3\n"                                       \
+       insn "\n"                                               \
+-"2:   " LOCK_PREFIX "cmpxchgl %3, %2\n\
++"2:   "lock cmpxchgl %3, %2\n\
+       jnz     1b\n\
+ 3:    .section .fixup,\"ax\"\n\
+ 4:    mov     %5, %1\n\
+@@ -62,7 +62,7 @@ futex_atomic_op_inuser (int encoded_op, 
+               __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++              __futex_atomic_op1("lock xaddl %0, %2", ret, oldval,
+                                  uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+@@ -101,7 +101,7 @@ futex_atomic_cmpxchg_inatomic(int __user
+               return -EFAULT;
+       __asm__ __volatile__(
+-              "1:     " LOCK_PREFIX "cmpxchgl %3, %1          \n"
++              "1:     lock cmpxchgl %3, %1                    \n"
+               "2:     .section .fixup, \"ax\"                 \n"
+               "3:     mov     %2, %0                          \n"