]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 22 Jan 2018 07:46:52 +0000 (08:46 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 22 Jan 2018 07:46:52 +0000 (08:46 +0100)
added patches:
kprobes-x86-blacklist-indirect-thunk-functions-for-kprobes.patch
kprobes-x86-disable-optimizing-on-the-function-jumps-to-indirect-thunk.patch
retpoline-introduce-start-end-markers-of-indirect-thunk.patch
x86-mce-make-machine-check-speculation-protected.patch
x86-pti-document-fix-wrong-index.patch
x86-retpoline-optimize-inline-assembler-for-vmexit_fill_rsb.patch

queue-4.4/kprobes-x86-blacklist-indirect-thunk-functions-for-kprobes.patch [new file with mode: 0644]
queue-4.4/kprobes-x86-disable-optimizing-on-the-function-jumps-to-indirect-thunk.patch [new file with mode: 0644]
queue-4.4/retpoline-introduce-start-end-markers-of-indirect-thunk.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/x86-mce-make-machine-check-speculation-protected.patch [new file with mode: 0644]
queue-4.4/x86-pti-document-fix-wrong-index.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-optimize-inline-assembler-for-vmexit_fill_rsb.patch [new file with mode: 0644]

diff --git a/queue-4.4/kprobes-x86-blacklist-indirect-thunk-functions-for-kprobes.patch b/queue-4.4/kprobes-x86-blacklist-indirect-thunk-functions-for-kprobes.patch
new file mode 100644 (file)
index 0000000..a552f71
--- /dev/null
@@ -0,0 +1,40 @@
+From c1804a236894ecc942da7dc6c5abe209e56cba93 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 19 Jan 2018 01:14:51 +0900
+Subject: kprobes/x86: Blacklist indirect thunk functions for kprobes
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit c1804a236894ecc942da7dc6c5abe209e56cba93 upstream.
+
+Mark __x86_indirect_thunk_* functions as blacklist for kprobes
+because those functions can be called from anywhere in the kernel
+including blacklist functions of kprobes.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Link: https://lkml.kernel.org/r/151629209111.10241.5444852823378068683.stgit@devbox
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/lib/retpoline.S |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
+  * than one per register with the correct names. So we do it
+  * the simple and nasty way...
+  */
+-#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
++#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
++#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
+ #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+ GENERATE_THUNK(_ASM_AX)
diff --git a/queue-4.4/kprobes-x86-disable-optimizing-on-the-function-jumps-to-indirect-thunk.patch b/queue-4.4/kprobes-x86-disable-optimizing-on-the-function-jumps-to-indirect-thunk.patch
new file mode 100644 (file)
index 0000000..96d8a8b
--- /dev/null
@@ -0,0 +1,80 @@
+From c86a32c09f8ced67971a2310e3b0dda4d1749007 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 19 Jan 2018 01:15:20 +0900
+Subject: kprobes/x86: Disable optimizing on the function jumps to indirect thunk
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit c86a32c09f8ced67971a2310e3b0dda4d1749007 upstream.
+
+Since indirect jump instructions will be replaced by jump
+to __x86_indirect_thunk_*, those jmp instruction must be
+treated as an indirect jump. Since optprobe prohibits to
+optimize probes in the function which uses an indirect jump,
+it also needs to find out the function which jump to
+__x86_indirect_thunk_* and disable optimization.
+
+Add a check that the jump target address is between the
+__indirect_thunk_start/end when optimizing kprobe.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Link: https://lkml.kernel.org/r/151629212062.10241.6991266100233002273.stgit@devbox
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/kprobes/opt.c |   23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -36,6 +36,7 @@
+ #include <asm/alternative.h>
+ #include <asm/insn.h>
+ #include <asm/debugreg.h>
++#include <asm/nospec-branch.h>
+ #include "common.h"
+@@ -191,7 +192,7 @@ static int copy_optimized_instructions(u
+ }
+ /* Check whether insn is indirect jump */
+-static int insn_is_indirect_jump(struct insn *insn)
++static int __insn_is_indirect_jump(struct insn *insn)
+ {
+       return ((insn->opcode.bytes[0] == 0xff &&
+               (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
+@@ -225,6 +226,26 @@ static int insn_jump_into_range(struct i
+       return (start <= target && target <= start + len);
+ }
++static int insn_is_indirect_jump(struct insn *insn)
++{
++      int ret = __insn_is_indirect_jump(insn);
++
++#ifdef CONFIG_RETPOLINE
++      /*
++       * Jump to x86_indirect_thunk_* is treated as an indirect jump.
++       * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
++       * older gcc may use indirect jump. So we add this check instead of
++       * replace indirect-jump check.
++       */
++      if (!ret)
++              ret = insn_jump_into_range(insn,
++                              (unsigned long)__indirect_thunk_start,
++                              (unsigned long)__indirect_thunk_end -
++                              (unsigned long)__indirect_thunk_start);
++#endif
++      return ret;
++}
++
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
diff --git a/queue-4.4/retpoline-introduce-start-end-markers-of-indirect-thunk.patch b/queue-4.4/retpoline-introduce-start-end-markers-of-indirect-thunk.patch
new file mode 100644 (file)
index 0000000..6aca881
--- /dev/null
@@ -0,0 +1,71 @@
+From 736e80a4213e9bbce40a7c050337047128b472ac Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 19 Jan 2018 01:14:21 +0900
+Subject: retpoline: Introduce start/end markers of indirect thunk
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 736e80a4213e9bbce40a7c050337047128b472ac upstream.
+
+Introduce start/end markers of __x86_indirect_thunk_* functions.
+To make it easy, consolidate .text.__x86.indirect_thunk.* sections
+to one .text.__x86.indirect_thunk section and put it in the
+end of kernel text section and adds __indirect_thunk_start/end
+so that other subsystem (e.g. kprobes) can identify it.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Link: https://lkml.kernel.org/r/151629206178.10241.6828804696410044771.stgit@devbox
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/nospec-branch.h |    3 +++
+ arch/x86/kernel/vmlinux.lds.S        |    7 +++++++
+ arch/x86/lib/retpoline.S             |    2 +-
+ 3 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -171,6 +171,9 @@ enum spectre_v2_mitigation {
+       SPECTRE_V2_IBRS,
+ };
++extern char __indirect_thunk_start[];
++extern char __indirect_thunk_end[];
++
+ /*
+  * On VMEXIT we must ensure that no RSB predictions learned in the guest
+  * can be followed in the host, by overwriting the RSB completely. Both
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -104,6 +104,13 @@ SECTIONS
+               IRQENTRY_TEXT
+               *(.fixup)
+               *(.gnu.warning)
++
++#ifdef CONFIG_RETPOLINE
++              __indirect_thunk_start = .;
++              *(.text.__x86.indirect_thunk)
++              __indirect_thunk_end = .;
++#endif
++
+               /* End of text section */
+               _etext = .;
+       } :text = 0x9090
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -9,7 +9,7 @@
+ #include <asm/nospec-branch.h>
+ .macro THUNK reg
+-      .section .text.__x86.indirect_thunk.\reg
++      .section .text.__x86.indirect_thunk
+ ENTRY(__x86_indirect_thunk_\reg)
+       CFI_STARTPROC
index 8f5bb6e8d69c5545b62bbf473084cb14f385ac8b..afe4d3134927f3c5ece26e652f64a586146ac913 100644 (file)
@@ -45,3 +45,9 @@ dm-thin-metadata-thin_max_concurrent_locks-should-be-6.patch
 arm64-kvm-fix-smccc-handling-of-unimplemented-smc-hvc-calls.patch
 x86-cpu-x86-pti-do-not-enable-pti-on-amd-processors.patch
 kbuild-modversions-for-export_symbol-for-asm.patch
+x86-mce-make-machine-check-speculation-protected.patch
+retpoline-introduce-start-end-markers-of-indirect-thunk.patch
+kprobes-x86-blacklist-indirect-thunk-functions-for-kprobes.patch
+kprobes-x86-disable-optimizing-on-the-function-jumps-to-indirect-thunk.patch
+x86-pti-document-fix-wrong-index.patch
+x86-retpoline-optimize-inline-assembler-for-vmexit_fill_rsb.patch
diff --git a/queue-4.4/x86-mce-make-machine-check-speculation-protected.patch b/queue-4.4/x86-mce-make-machine-check-speculation-protected.patch
new file mode 100644 (file)
index 0000000..697d2b0
--- /dev/null
@@ -0,0 +1,63 @@
+From 6f41c34d69eb005e7848716bbcafc979b35037d5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 18 Jan 2018 16:28:26 +0100
+Subject: x86/mce: Make machine check speculation protected
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 6f41c34d69eb005e7848716bbcafc979b35037d5 upstream.
+
+The machine check idtentry uses an indirect branch directly from the low
+level code. This evades the speculation protection.
+
+Replace it by a direct call into C code and issue the indirect call there
+so the compiler can apply the proper speculation protection.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by:Borislav Petkov <bp@alien8.de>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Niced-by: Peter Zijlstra <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801181626290.1847@nanos
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_64.S        |    2 +-
+ arch/x86/include/asm/traps.h     |    1 +
+ arch/x86/kernel/cpu/mcheck/mce.c |    5 +++++
+ 3 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1031,7 +1031,7 @@ idtentry async_page_fault        do_async_page_
+ #endif
+ #ifdef CONFIG_X86_MCE
+-idtentry machine_check                                        has_error_code=0        paranoid=1 do_sym=*machine_check_vector(%rip)
++idtentry machine_check                do_mce                  has_error_code=0        paranoid=1
+ #endif
+ /*
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -92,6 +92,7 @@ dotraplinkage void do_simd_coprocessor_e
+ #ifdef CONFIG_X86_32
+ dotraplinkage void do_iret_error(struct pt_regs *, long);
+ #endif
++dotraplinkage void do_mce(struct pt_regs *, long);
+ static inline int get_si_code(unsigned long condition)
+ {
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -1672,6 +1672,11 @@ static void unexpected_machine_check(str
+ void (*machine_check_vector)(struct pt_regs *, long error_code) =
+                                               unexpected_machine_check;
++dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
++{
++      machine_check_vector(regs, error_code);
++}
++
+ /*
+  * Called for each booted CPU to set up machine checks.
+  * Must be called with preempt off:
diff --git a/queue-4.4/x86-pti-document-fix-wrong-index.patch b/queue-4.4/x86-pti-document-fix-wrong-index.patch
new file mode 100644 (file)
index 0000000..cb7b93a
--- /dev/null
@@ -0,0 +1,32 @@
+From 98f0fceec7f84d80bc053e49e596088573086421 Mon Sep 17 00:00:00 2001
+From: "zhenwei.pi" <zhenwei.pi@youruncloud.com>
+Date: Thu, 18 Jan 2018 09:04:52 +0800
+Subject: x86/pti: Document fix wrong index
+
+From: zhenwei.pi <zhenwei.pi@youruncloud.com>
+
+commit 98f0fceec7f84d80bc053e49e596088573086421 upstream.
+
+In section <2. Runtime Cost>, fix wrong index.
+
+Signed-off-by: zhenwei.pi <zhenwei.pi@youruncloud.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: dave.hansen@linux.intel.com
+Link: https://lkml.kernel.org/r/1516237492-27739-1-git-send-email-zhenwei.pi@youruncloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/x86/pti.txt |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/x86/pti.txt
++++ b/Documentation/x86/pti.txt
+@@ -78,7 +78,7 @@ this protection comes at a cost:
+      non-PTI SYSCALL entry code, so requires mapping fewer
+      things into the userspace page tables.  The downside is
+      that stacks must be switched at entry time.
+-  d. Global pages are disabled for all kernel structures not
++  c. Global pages are disabled for all kernel structures not
+      mapped into both kernel and userspace page tables.  This
+      feature of the MMU allows different processes to share TLB
+      entries mapping the kernel.  Losing the feature means more
diff --git a/queue-4.4/x86-retpoline-optimize-inline-assembler-for-vmexit_fill_rsb.patch b/queue-4.4/x86-retpoline-optimize-inline-assembler-for-vmexit_fill_rsb.patch
new file mode 100644 (file)
index 0000000..b1ee18f
--- /dev/null
@@ -0,0 +1,57 @@
+From 3f7d875566d8e79c5e0b2c9a413e91b2c29e0854 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Wed, 17 Jan 2018 14:53:28 -0800
+Subject: x86/retpoline: Optimize inline assembler for vmexit_fill_RSB
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 3f7d875566d8e79c5e0b2c9a413e91b2c29e0854 upstream.
+
+The generated assembler for the C fill RSB inline asm operations has
+several issues:
+
+- The C code sets up the loop register, which is then immediately
+  overwritten in __FILL_RETURN_BUFFER with the same value again.
+
+- The C code also passes in the iteration count in another register, which
+  is not used at all.
+
+Remove these two unnecessary operations. Just rely on the single constant
+passed to the macro for the iterations.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: dave.hansen@intel.com
+Cc: gregkh@linuxfoundation.org
+Cc: torvalds@linux-foundation.org
+Cc: arjan@linux.intel.com
+Link: https://lkml.kernel.org/r/20180117225328.15414-1-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/nospec-branch.h |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -183,15 +183,16 @@ extern char __indirect_thunk_end[];
+ static inline void vmexit_fill_RSB(void)
+ {
+ #ifdef CONFIG_RETPOLINE
+-      unsigned long loops = RSB_CLEAR_LOOPS / 2;
++      unsigned long loops;
+       asm volatile (ALTERNATIVE("jmp 910f",
+                                 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+                                 X86_FEATURE_RETPOLINE)
+                     "910:"
+-                    : "=&r" (loops), ASM_CALL_CONSTRAINT
+-                    : "r" (loops) : "memory" );
++                    : "=r" (loops), ASM_CALL_CONSTRAINT
++                    : : "memory" );
+ #endif
+ }
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */