]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Jan 2015 22:56:07 +0000 (14:56 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Jan 2015 22:56:07 +0000 (14:56 -0800)
added patches:
x86-vdso-use-asm-volatile-in-__getcpu.patch

queue-3.18/series
queue-3.18/x86-vdso-use-asm-volatile-in-__getcpu.patch [new file with mode: 0644]

index 646859f19c36ee68f9a4f8464940c89ccab87e6a..d6da9c6265aa59c675267ff3dd23c3204a33e282 100644 (file)
@@ -67,3 +67,4 @@ kvm-s390-flush-cpu-on-load-control.patch
 kvm-s390-fix-ipte-locking.patch
 kvm-x86-drop-severity-of-generation-wraparound-message.patch
 x86_64-vdso-fix-the-vdso-address-randomization-algorithm.patch
+x86-vdso-use-asm-volatile-in-__getcpu.patch
diff --git a/queue-3.18/x86-vdso-use-asm-volatile-in-__getcpu.patch b/queue-3.18/x86-vdso-use-asm-volatile-in-__getcpu.patch
new file mode 100644 (file)
index 0000000..77e34c9
--- /dev/null
@@ -0,0 +1,62 @@
+From 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Sun, 21 Dec 2014 08:57:46 -0800
+Subject: x86, vdso: Use asm volatile in __getcpu
+
+From: Andy Lutomirski <luto@amacapital.net>
+
+commit 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba upstream.
+
+In Linux 3.18 and below, GCC hoists the lsl instructions in the
+pvclock code all the way to the beginning of __vdso_clock_gettime,
+slowing the non-paravirt case significantly.  For unknown reasons,
+presumably related to the removal of a branch, the performance issue
+is gone as of
+
+e76b027e6408 x86,vdso: Use LSL unconditionally for vgetcpu
+
+but I don't trust GCC enough to expect the problem to stay fixed.
+
+There should be no correctness issue, because the __getcpu calls in
+__vdso_vlock_gettime were never necessary in the first place.
+
+Note to stable maintainers: In 3.18 and below, depending on
+configuration, gcc 4.9.2 generates code like this:
+
+     9c3:       44 0f 03 e8             lsl    %ax,%r13d
+     9c7:       45 89 eb                mov    %r13d,%r11d
+     9ca:       0f 03 d8                lsl    %ax,%ebx
+
+This patch won't apply as is to any released kernel, but I'll send a
+trivial backported version if needed.
+
+[
+ Backported by Andy Lutomirski.  Should apply to all affected
+ versions.  This fixes a functionality bug as well as a performance
+ bug: buggy kernels can infinite loop in __vdso_clock_gettime on
+ affected compilers.  See, for exammple:
+
+ https://bugzilla.redhat.com/show_bug.cgi?id=1178975
+]
+
+Fixes: 51c19b4f5927 x86: vdso: pvclock gettime support
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/vsyscall.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void
+               native_read_tscp(&p);
+       } else {
+               /* Load per CPU data from GDT */
+-              asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
++              asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       }
+       return p;