]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 14 Aug 2018 10:25:39 +0000 (12:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 14 Aug 2018 10:25:39 +0000 (12:25 +0200)
added patches:
kprobes-x86-fix-p-uses-in-error-messages.patch
x86-irqflags-provide-a-declaration-for-native_save_fl.patch
x86-paravirt-fix-spectre-v2-mitigations-for-paravirt-guests.patch
x86-speculation-protect-against-userspace-userspace-spectrersb.patch

queue-4.4/kprobes-x86-fix-p-uses-in-error-messages.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/x86-irqflags-provide-a-declaration-for-native_save_fl.patch [new file with mode: 0644]
queue-4.4/x86-paravirt-fix-spectre-v2-mitigations-for-paravirt-guests.patch [new file with mode: 0644]
queue-4.4/x86-speculation-protect-against-userspace-userspace-spectrersb.patch [new file with mode: 0644]

diff --git a/queue-4.4/kprobes-x86-fix-p-uses-in-error-messages.patch b/queue-4.4/kprobes-x86-fix-p-uses-in-error-messages.patch
new file mode 100644 (file)
index 0000000..a4b60ae
--- /dev/null
@@ -0,0 +1,60 @@
+From 0ea063306eecf300fcf06d2f5917474b580f666f Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Sat, 28 Apr 2018 21:37:03 +0900
+Subject: kprobes/x86: Fix %p uses in error messages
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 0ea063306eecf300fcf06d2f5917474b580f666f upstream.
+
+Remove all %p uses in error messages in kprobes/x86.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: David Howells <dhowells@redhat.com>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Jon Medhurst <tixy@linaro.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Richter <tmricht@linux.ibm.com>
+Cc: Tobin C . Harding <me@tobin.cc>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: acme@kernel.org
+Cc: akpm@linux-foundation.org
+Cc: brueckner@linux.vnet.ibm.com
+Cc: linux-arch@vger.kernel.org
+Cc: rostedt@goodmis.org
+Cc: schwidefsky@de.ibm.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/lkml/152491902310.9916.13355297638917767319.stgit@devbox
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kprobes/core.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -393,7 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src
+               newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
+               if ((s64) (s32) newdisp != newdisp) {
+                       pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
+-                      pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
+                       return 0;
+               }
+               disp = (u8 *) dest + insn_offset_displacement(&insn);
+@@ -609,8 +608,7 @@ static int reenter_kprobe(struct kprobe
+                * Raise a BUG or we'll continue in an endless reentering loop
+                * and eventually a stack overflow.
+                */
+-              printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
+-                     p->addr);
++              pr_err("Unrecoverable kprobe detected.\n");
+               dump_kprobe(p);
+               BUG();
+       default:
index 2f7d76d61ca2722f2b16c59ea7f1247f6e75e2aa..859ff6f5cbe4bd7c25ac932d259ce3e6a7f9e164 100644 (file)
@@ -14,3 +14,7 @@ ib-core-make-testing-mr-flags-for-writability-a-static-inline-function.patch
 ib-mlx4-mark-user-mr-as-writable-if-actual-virtual-memory-is-writable.patch
 ib-ocrdma-fix-out-of-bounds-access-to-local-buffer.patch
 arm-dts-imx6sx-fix-irq-for-pcie-bridge.patch
+x86-paravirt-fix-spectre-v2-mitigations-for-paravirt-guests.patch
+x86-speculation-protect-against-userspace-userspace-spectrersb.patch
+kprobes-x86-fix-p-uses-in-error-messages.patch
+x86-irqflags-provide-a-declaration-for-native_save_fl.patch
diff --git a/queue-4.4/x86-irqflags-provide-a-declaration-for-native_save_fl.patch b/queue-4.4/x86-irqflags-provide-a-declaration-for-native_save_fl.patch
new file mode 100644 (file)
index 0000000..eca5c53
--- /dev/null
@@ -0,0 +1,53 @@
+From 208cbb32558907f68b3b2a081ca2337ac3744794 Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Fri, 3 Aug 2018 10:05:50 -0700
+Subject: x86/irqflags: Provide a declaration for native_save_fl
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+commit 208cbb32558907f68b3b2a081ca2337ac3744794 upstream.
+
+It was reported that the commit d0a8d9378d16 is causing users of gcc < 4.9
+to observe -Werror=missing-prototypes errors.
+
+Indeed, it seems that:
+extern inline unsigned long native_save_fl(void) { return 0; }
+
+compiled with -Werror=missing-prototypes produces this warning in gcc <
+4.9, but not gcc >= 4.9.
+
+Fixes: d0a8d9378d16 ("x86/paravirt: Make native_save_fl() extern inline").
+Reported-by: David Laight <david.laight@aculab.com>
+Reported-by: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: hpa@zytor.com
+Cc: jgross@suse.com
+Cc: kstewart@linuxfoundation.org
+Cc: gregkh@linuxfoundation.org
+Cc: boris.ostrovsky@oracle.com
+Cc: astrachan@google.com
+Cc: mka@chromium.org
+Cc: arnd@arndb.de
+Cc: tstellar@redhat.com
+Cc: sedat.dilek@gmail.com
+Cc: David.Laight@aculab.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180803170550.164688-1-ndesaulniers@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/irqflags.h |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -8,6 +8,8 @@
+  * Interrupt control:
+  */
++/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
++extern inline unsigned long native_save_fl(void);
+ extern inline unsigned long native_save_fl(void)
+ {
+       unsigned long flags;
diff --git a/queue-4.4/x86-paravirt-fix-spectre-v2-mitigations-for-paravirt-guests.patch b/queue-4.4/x86-paravirt-fix-spectre-v2-mitigations-for-paravirt-guests.patch
new file mode 100644 (file)
index 0000000..aef2730
--- /dev/null
@@ -0,0 +1,161 @@
+From 5800dc5c19f34e6e03b5adab1282535cb102fafd Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 3 Aug 2018 16:41:39 +0200
+Subject: x86/paravirt: Fix spectre-v2 mitigations for paravirt guests
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 5800dc5c19f34e6e03b5adab1282535cb102fafd upstream.
+
+Nadav reported that on guests we're failing to rewrite the indirect
+calls to CALLEE_SAVE paravirt functions. In particular the
+pv_queued_spin_unlock() call is left unpatched and that is all over the
+place. This obviously wrecks Spectre-v2 mitigation (for paravirt
+guests) which relies on not actually having indirect calls around.
+
+The reason is an incorrect clobber test in paravirt_patch_call(); this
+function rewrites an indirect call with a direct call to the _SAME_
+function, there is no possible way the clobbers can be different
+because of this.
+
+Therefore remove this clobber check. Also put WARNs on the other patch
+failure case (not enough room for the instruction) which I've not seen
+trigger in my (limited) testing.
+
+Three live kernel image disassemblies for lock_sock_nested (as a small
+function that illustrates the problem nicely). PRE is the current
+situation for guests, POST is with this patch applied and NATIVE is with
+or without the patch for !guests.
+
+PRE:
+
+(gdb) disassemble lock_sock_nested
+Dump of assembler code for function lock_sock_nested:
+   0xffffffff817be970 <+0>:     push   %rbp
+   0xffffffff817be971 <+1>:     mov    %rdi,%rbp
+   0xffffffff817be974 <+4>:     push   %rbx
+   0xffffffff817be975 <+5>:     lea    0x88(%rbp),%rbx
+   0xffffffff817be97c <+12>:    callq  0xffffffff819f7160 <_cond_resched>
+   0xffffffff817be981 <+17>:    mov    %rbx,%rdi
+   0xffffffff817be984 <+20>:    callq  0xffffffff819fbb00 <_raw_spin_lock_bh>
+   0xffffffff817be989 <+25>:    mov    0x8c(%rbp),%eax
+   0xffffffff817be98f <+31>:    test   %eax,%eax
+   0xffffffff817be991 <+33>:    jne    0xffffffff817be9ba <lock_sock_nested+74>
+   0xffffffff817be993 <+35>:    movl   $0x1,0x8c(%rbp)
+   0xffffffff817be99d <+45>:    mov    %rbx,%rdi
+   0xffffffff817be9a0 <+48>:    callq  *0xffffffff822299e8
+   0xffffffff817be9a7 <+55>:    pop    %rbx
+   0xffffffff817be9a8 <+56>:    pop    %rbp
+   0xffffffff817be9a9 <+57>:    mov    $0x200,%esi
+   0xffffffff817be9ae <+62>:    mov    $0xffffffff817be993,%rdi
+   0xffffffff817be9b5 <+69>:    jmpq   0xffffffff81063ae0 <__local_bh_enable_ip>
+   0xffffffff817be9ba <+74>:    mov    %rbp,%rdi
+   0xffffffff817be9bd <+77>:    callq  0xffffffff817be8c0 <__lock_sock>
+   0xffffffff817be9c2 <+82>:    jmp    0xffffffff817be993 <lock_sock_nested+35>
+End of assembler dump.
+
+POST:
+
+(gdb) disassemble lock_sock_nested
+Dump of assembler code for function lock_sock_nested:
+   0xffffffff817be970 <+0>:     push   %rbp
+   0xffffffff817be971 <+1>:     mov    %rdi,%rbp
+   0xffffffff817be974 <+4>:     push   %rbx
+   0xffffffff817be975 <+5>:     lea    0x88(%rbp),%rbx
+   0xffffffff817be97c <+12>:    callq  0xffffffff819f7160 <_cond_resched>
+   0xffffffff817be981 <+17>:    mov    %rbx,%rdi
+   0xffffffff817be984 <+20>:    callq  0xffffffff819fbb00 <_raw_spin_lock_bh>
+   0xffffffff817be989 <+25>:    mov    0x8c(%rbp),%eax
+   0xffffffff817be98f <+31>:    test   %eax,%eax
+   0xffffffff817be991 <+33>:    jne    0xffffffff817be9ba <lock_sock_nested+74>
+   0xffffffff817be993 <+35>:    movl   $0x1,0x8c(%rbp)
+   0xffffffff817be99d <+45>:    mov    %rbx,%rdi
+   0xffffffff817be9a0 <+48>:    callq  0xffffffff810a0c20 <__raw_callee_save___pv_queued_spin_unlock>
+   0xffffffff817be9a5 <+53>:    xchg   %ax,%ax
+   0xffffffff817be9a7 <+55>:    pop    %rbx
+   0xffffffff817be9a8 <+56>:    pop    %rbp
+   0xffffffff817be9a9 <+57>:    mov    $0x200,%esi
+   0xffffffff817be9ae <+62>:    mov    $0xffffffff817be993,%rdi
+   0xffffffff817be9b5 <+69>:    jmpq   0xffffffff81063aa0 <__local_bh_enable_ip>
+   0xffffffff817be9ba <+74>:    mov    %rbp,%rdi
+   0xffffffff817be9bd <+77>:    callq  0xffffffff817be8c0 <__lock_sock>
+   0xffffffff817be9c2 <+82>:    jmp    0xffffffff817be993 <lock_sock_nested+35>
+End of assembler dump.
+
+NATIVE:
+
+(gdb) disassemble lock_sock_nested
+Dump of assembler code for function lock_sock_nested:
+   0xffffffff817be970 <+0>:     push   %rbp
+   0xffffffff817be971 <+1>:     mov    %rdi,%rbp
+   0xffffffff817be974 <+4>:     push   %rbx
+   0xffffffff817be975 <+5>:     lea    0x88(%rbp),%rbx
+   0xffffffff817be97c <+12>:    callq  0xffffffff819f7160 <_cond_resched>
+   0xffffffff817be981 <+17>:    mov    %rbx,%rdi
+   0xffffffff817be984 <+20>:    callq  0xffffffff819fbb00 <_raw_spin_lock_bh>
+   0xffffffff817be989 <+25>:    mov    0x8c(%rbp),%eax
+   0xffffffff817be98f <+31>:    test   %eax,%eax
+   0xffffffff817be991 <+33>:    jne    0xffffffff817be9ba <lock_sock_nested+74>
+   0xffffffff817be993 <+35>:    movl   $0x1,0x8c(%rbp)
+   0xffffffff817be99d <+45>:    mov    %rbx,%rdi
+   0xffffffff817be9a0 <+48>:    movb   $0x0,(%rdi)
+   0xffffffff817be9a3 <+51>:    nopl   0x0(%rax)
+   0xffffffff817be9a7 <+55>:    pop    %rbx
+   0xffffffff817be9a8 <+56>:    pop    %rbp
+   0xffffffff817be9a9 <+57>:    mov    $0x200,%esi
+   0xffffffff817be9ae <+62>:    mov    $0xffffffff817be993,%rdi
+   0xffffffff817be9b5 <+69>:    jmpq   0xffffffff81063ae0 <__local_bh_enable_ip>
+   0xffffffff817be9ba <+74>:    mov    %rbp,%rdi
+   0xffffffff817be9bd <+77>:    callq  0xffffffff817be8c0 <__lock_sock>
+   0xffffffff817be9c2 <+82>:    jmp    0xffffffff817be993 <lock_sock_nested+35>
+End of assembler dump.
+
+
+Fixes: 63f70270ccd9 ("[PATCH] i386: PARAVIRT: add common patching machinery")
+Fixes: 3010a0663fd9 ("x86/paravirt, objtool: Annotate indirect calls")
+Reported-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/paravirt.c |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnb
+       struct branch *b = insnbuf;
+       unsigned long delta = (unsigned long)target - (addr+5);
+-      if (tgt_clobbers & ~site_clobbers)
+-              return len;     /* target would clobber too much for this site */
+-      if (len < 5)
++      if (len < 5) {
++#ifdef CONFIG_RETPOLINE
++              WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
++#endif
+               return len;     /* call too long for patch site */
++      }
+       b->opcode = 0xe8; /* call */
+       b->delta = delta;
+@@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbu
+       struct branch *b = insnbuf;
+       unsigned long delta = (unsigned long)target - (addr+5);
+-      if (len < 5)
++      if (len < 5) {
++#ifdef CONFIG_RETPOLINE
++              WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
++#endif
+               return len;     /* call too long for patch site */
++      }
+       b->opcode = 0xe9;       /* jmp */
+       b->delta = delta;
diff --git a/queue-4.4/x86-speculation-protect-against-userspace-userspace-spectrersb.patch b/queue-4.4/x86-speculation-protect-against-userspace-userspace-spectrersb.patch
new file mode 100644 (file)
index 0000000..94747fa
--- /dev/null
@@ -0,0 +1,92 @@
+From fdf82a7856b32d905c39afc85e34364491e46346 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Thu, 26 Jul 2018 13:14:55 +0200
+Subject: x86/speculation: Protect against userspace-userspace spectreRSB
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+commit fdf82a7856b32d905c39afc85e34364491e46346 upstream.
+
+The article "Spectre Returns! Speculation Attacks using the Return Stack
+Buffer" [1] describes two new (sub-)variants of spectrev2-like attacks,
+making use solely of the RSB contents even on CPUs that don't fallback to
+BTB on RSB underflow (Skylake+).
+
+Mitigate userspace-userspace attacks by always unconditionally filling RSB on
+context switch when the generic spectrev2 mitigation has been enabled.
+
+[1] https://arxiv.org/pdf/1807.07940.pdf
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1807261308190.997@cbobk.fhfr.pm
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c |   38 +++++++-------------------------------
+ 1 file changed, 7 insertions(+), 31 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -309,23 +309,6 @@ static enum spectre_v2_mitigation_cmd __
+       return cmd;
+ }
+-/* Check for Skylake-like CPUs (for RSB handling) */
+-static bool __init is_skylake_era(void)
+-{
+-      if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+-          boot_cpu_data.x86 == 6) {
+-              switch (boot_cpu_data.x86_model) {
+-              case INTEL_FAM6_SKYLAKE_MOBILE:
+-              case INTEL_FAM6_SKYLAKE_DESKTOP:
+-              case INTEL_FAM6_SKYLAKE_X:
+-              case INTEL_FAM6_KABYLAKE_MOBILE:
+-              case INTEL_FAM6_KABYLAKE_DESKTOP:
+-                      return true;
+-              }
+-      }
+-      return false;
+-}
+-
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -386,22 +369,15 @@ retpoline_auto:
+       pr_info("%s\n", spectre_v2_strings[mode]);
+       /*
+-       * If neither SMEP nor PTI are available, there is a risk of
+-       * hitting userspace addresses in the RSB after a context switch
+-       * from a shallow call stack to a deeper one. To prevent this fill
+-       * the entire RSB, even when using IBRS.
++       * If spectre v2 protection has been enabled, unconditionally fill
++       * RSB during a context switch; this protects against two independent
++       * issues:
+        *
+-       * Skylake era CPUs have a separate issue with *underflow* of the
+-       * RSB, when they will predict 'ret' targets from the generic BTB.
+-       * The proper mitigation for this is IBRS. If IBRS is not supported
+-       * or deactivated in favour of retpolines the RSB fill on context
+-       * switch is required.
++       *      - RSB underflow (and switch to BTB) on Skylake+
++       *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
+        */
+-      if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
+-           !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+-              setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+-              pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
+-      }
++      setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++      pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+       /* Initialize Indirect Branch Prediction Barrier if supported */
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {