]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Jan 2015 23:33:11 +0000 (15:33 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Jan 2015 23:33:11 +0000 (15:33 -0800)
added patches:
x86-vdso-use-asm-volatile-in-__getcpu.patch

queue-3.14/series
queue-3.14/x86-vdso-use-asm-volatile-in-__getcpu.patch [new file with mode: 0644]

index e7a5c07b01f4df3974058421bfb19034ac42b3bd..b7bffed4e10399a730812ea8b8aac44e6964e5ca 100644 (file)
@@ -33,3 +33,4 @@ hid-add-battery-quirk-for-usb_device_id_apple_alu_wireless_2011_iso-keyboard.pat
 hid-add-a-new-id-0x501a-for-genius-mousepen-i608x.patch
 kvm-x86-drop-severity-of-generation-wraparound-message.patch
 x86_64-vdso-fix-the-vdso-address-randomization-algorithm.patch
+x86-vdso-use-asm-volatile-in-__getcpu.patch
diff --git a/queue-3.14/x86-vdso-use-asm-volatile-in-__getcpu.patch b/queue-3.14/x86-vdso-use-asm-volatile-in-__getcpu.patch
new file mode 100644 (file)
index 0000000..77e34c9
--- /dev/null
@@ -0,0 +1,62 @@
+From 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Sun, 21 Dec 2014 08:57:46 -0800
+Subject: x86, vdso: Use asm volatile in __getcpu
+
+From: Andy Lutomirski <luto@amacapital.net>
+
+commit 1ddf0b1b11aa8a90cef6706e935fc31c75c406ba upstream.
+
+In Linux 3.18 and below, GCC hoists the lsl instructions in the
+pvclock code all the way to the beginning of __vdso_clock_gettime,
+slowing the non-paravirt case significantly.  For unknown reasons,
+presumably related to the removal of a branch, the performance issue
+is gone as of
+
+e76b027e6408 x86,vdso: Use LSL unconditionally for vgetcpu
+
+but I don't trust GCC enough to expect the problem to stay fixed.
+
+There should be no correctness issue, because the __getcpu calls in
+__vdso_vlock_gettime were never necessary in the first place.
+
+Note to stable maintainers: In 3.18 and below, depending on
+configuration, gcc 4.9.2 generates code like this:
+
+     9c3:       44 0f 03 e8             lsl    %ax,%r13d
+     9c7:       45 89 eb                mov    %r13d,%r11d
+     9ca:       0f 03 d8                lsl    %ax,%ebx
+
+This patch won't apply as is to any released kernel, but I'll send a
+trivial backported version if needed.
+
+[
+ Backported by Andy Lutomirski.  Should apply to all affected
+ versions.  This fixes a functionality bug as well as a performance
+ bug: buggy kernels can infinite loop in __vdso_clock_gettime on
+ affected compilers.  See, for exammple:
+
+ https://bugzilla.redhat.com/show_bug.cgi?id=1178975
+]
+
+Fixes: 51c19b4f5927 x86: vdso: pvclock gettime support
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/vsyscall.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void
+               native_read_tscp(&p);
+       } else {
+               /* Load per CPU data from GDT */
+-              asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
++              asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       }
+       return p;