]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Jan 2018 10:56:26 +0000 (11:56 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Jan 2018 10:56:26 +0000 (11:56 +0100)
added patches:
x86-retpoline-add-initial-retpoline-support.patch
x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch
x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch
x86-retpoline-fill-return-stack-buffer-on-vmexit.patch
x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch
x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch
x86-retpoline-irq32-convert-assembler-indirect-jumps.patch
x86-retpoline-remove-compile-time-warning.patch
x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch
x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch

12 files changed:
queue-4.4/series
queue-4.4/x86-retpoline-add-initial-retpoline-support.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-fill-return-stack-buffer-on-vmexit.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-irq32-convert-assembler-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-remove-compile-time-warning.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch [new file with mode: 0644]
queue-4.4/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch [new file with mode: 0644]

index 56ee1318de536dd634d0f5fabf41db9f88c2c045..9d0e082dfa80dc904bac646beca3361c586ee7bf 100644 (file)
@@ -7,3 +7,14 @@ x86-kbuild-enable-modversions-for-symbols-exported-from-asm.patch
 x86-asm-make-asm-alternative.h-safe-from-assembly.patch
 export_symbol-for-asm.patch
 kconfig.h-use-__is_defined-to-check-if-module-is-defined.patch
+x86-retpoline-add-initial-retpoline-support.patch
+x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
+x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
+x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch
+x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch
+x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch
+x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch
+x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch
+x86-retpoline-irq32-convert-assembler-indirect-jumps.patch
+x86-retpoline-fill-return-stack-buffer-on-vmexit.patch
+x86-retpoline-remove-compile-time-warning.patch
diff --git a/queue-4.4/x86-retpoline-add-initial-retpoline-support.patch b/queue-4.4/x86-retpoline-add-initial-retpoline-support.patch
new file mode 100644 (file)
index 0000000..f5e8b43
--- /dev/null
@@ -0,0 +1,338 @@
+From 76b043848fd22dbf7f8bf3a1452f8c70d557b860 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:25 +0000
+Subject: x86/retpoline: Add initial retpoline support
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 76b043848fd22dbf7f8bf3a1452f8c70d557b860 upstream.
+
+Enable the use of -mindirect-branch=thunk-extern in newer GCC, and provide
+the corresponding thunks. Provide assembler macros for invoking the thunks
+in the same way that GCC does, from native and inline assembler.
+
+This adds X86_FEATURE_RETPOLINE and sets it by default on all CPUs. In
+some circumstances, IBRS microcode features may be used instead, and the
+retpoline can be disabled.
+
+On AMD CPUs if lfence is serialising, the retpoline can be dramatically
+simplified to a simple "lfence; jmp *\reg". A future patch, after it has
+been verified that lfence really is serialising in all circumstances, can
+enable this by setting the X86_FEATURE_RETPOLINE_AMD feature bit in addition
+to X86_FEATURE_RETPOLINE.
+
+Do not align the retpoline in the altinstr section, because there is no
+guarantee that it stays aligned when it's copied over the oldinstr during
+alternative patching.
+
+[ Andi Kleen: Rename the macros, add CONFIG_RETPOLINE option, export thunks]
+[ tglx: Put actual function CALL/JMP in front of the macros, convert to
+       symbolic labels ]
+[ dwmw2: Convert back to numeric labels, merge objtool fixes ]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-4-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+[ 4.4 backport: removed objtool annotation since there is no objtool ]
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig                      |   13 ++++
+ arch/x86/Makefile                     |   10 +++
+ arch/x86/include/asm/asm-prototypes.h |   25 ++++++++
+ arch/x86/include/asm/cpufeature.h     |    2 
+ arch/x86/include/asm/nospec-branch.h  |  106 ++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c          |    4 +
+ arch/x86/lib/Makefile                 |    1 
+ arch/x86/lib/retpoline.S              |   48 +++++++++++++++
+ 8 files changed, 209 insertions(+)
+ create mode 100644 arch/x86/include/asm/nospec-branch.h
+ create mode 100644 arch/x86/lib/retpoline.S
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -379,6 +379,19 @@ config GOLDFISH
+        def_bool y
+        depends on X86_GOLDFISH
++config RETPOLINE
++      bool "Avoid speculative indirect branches in kernel"
++      default y
++      ---help---
++        Compile kernel with the retpoline compiler options to guard against
++        kernel-to-user data leaks by avoiding speculative indirect
++        branches. Requires a compiler with -mindirect-branch=thunk-extern
++        support for full protection. The kernel may run slower.
++
++        Without compiler support, at least indirect branches in assembler
++        code are eliminated. Since this includes the syscall entry path,
++        it is not entirely pointless.
++
+ if X86_32
+ config X86_EXTENDED_PLATFORM
+       bool "Support for extended (non-PC) x86 platforms"
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -189,6 +189,16 @@ KBUILD_CFLAGS += -fno-asynchronous-unwin
+ KBUILD_CFLAGS += $(mflags-y)
+ KBUILD_AFLAGS += $(mflags-y)
++# Avoid indirect branches in kernel to deal with Spectre
++ifdef CONFIG_RETPOLINE
++    RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
++    ifneq ($(RETPOLINE_CFLAGS),)
++        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
++    else
++        $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.)
++    endif
++endif
++
+ archscripts: scripts_basic
+       $(Q)$(MAKE) $(build)=arch/x86/tools relocs
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -10,7 +10,32 @@
+ #include <asm/pgtable.h>
+ #include <asm/special_insns.h>
+ #include <asm/preempt.h>
++#include <asm/asm.h>
+ #ifndef CONFIG_X86_CMPXCHG64
+ extern void cmpxchg8b_emu(void);
+ #endif
++
++#ifdef CONFIG_RETPOLINE
++#ifdef CONFIG_X86_32
++#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
++#else
++#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
++INDIRECT_THUNK(8)
++INDIRECT_THUNK(9)
++INDIRECT_THUNK(10)
++INDIRECT_THUNK(11)
++INDIRECT_THUNK(12)
++INDIRECT_THUNK(13)
++INDIRECT_THUNK(14)
++INDIRECT_THUNK(15)
++#endif
++INDIRECT_THUNK(ax)
++INDIRECT_THUNK(bx)
++INDIRECT_THUNK(cx)
++INDIRECT_THUNK(dx)
++INDIRECT_THUNK(si)
++INDIRECT_THUNK(di)
++INDIRECT_THUNK(bp)
++INDIRECT_THUNK(sp)
++#endif /* CONFIG_RETPOLINE */
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -200,6 +200,8 @@
+ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+ #define X86_FEATURE_INTEL_PT  ( 7*32+15) /* Intel Processor Trace */
++#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
+ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+ #define X86_FEATURE_KAISER    ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+--- /dev/null
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -0,0 +1,106 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#ifndef __NOSPEC_BRANCH_H__
++#define __NOSPEC_BRANCH_H__
++
++#include <asm/alternative.h>
++#include <asm/alternative-asm.h>
++#include <asm/cpufeature.h>
++
++#ifdef __ASSEMBLY__
++
++/*
++ * These are the bare retpoline primitives for indirect jmp and call.
++ * Do not use these directly; they only exist to make the ALTERNATIVE
++ * invocation below less ugly.
++ */
++.macro RETPOLINE_JMP reg:req
++      call    .Ldo_rop_\@
++.Lspec_trap_\@:
++      pause
++      jmp     .Lspec_trap_\@
++.Ldo_rop_\@:
++      mov     \reg, (%_ASM_SP)
++      ret
++.endm
++
++/*
++ * This is a wrapper around RETPOLINE_JMP so the called function in reg
++ * returns to the instruction after the macro.
++ */
++.macro RETPOLINE_CALL reg:req
++      jmp     .Ldo_call_\@
++.Ldo_retpoline_jmp_\@:
++      RETPOLINE_JMP \reg
++.Ldo_call_\@:
++      call    .Ldo_retpoline_jmp_\@
++.endm
++
++/*
++ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
++ * indirect jmp/call which may be susceptible to the Spectre variant 2
++ * attack.
++ */
++.macro JMP_NOSPEC reg:req
++#ifdef CONFIG_RETPOLINE
++      ALTERNATIVE_2 __stringify(jmp *\reg),                           \
++              __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
++              __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
++#else
++      jmp     *\reg
++#endif
++.endm
++
++.macro CALL_NOSPEC reg:req
++#ifdef CONFIG_RETPOLINE
++      ALTERNATIVE_2 __stringify(call *\reg),                          \
++              __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
++              __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
++#else
++      call    *\reg
++#endif
++.endm
++
++#else /* __ASSEMBLY__ */
++
++#if defined(CONFIG_X86_64) && defined(RETPOLINE)
++
++/*
++ * Since the inline asm uses the %V modifier which is only in newer GCC,
++ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
++ */
++# define CALL_NOSPEC                                          \
++      ALTERNATIVE(                                            \
++      "call *%[thunk_target]\n",                              \
++      "call __x86_indirect_thunk_%V[thunk_target]\n",         \
++      X86_FEATURE_RETPOLINE)
++# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
++
++#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
++/*
++ * For i386 we use the original ret-equivalent retpoline, because
++ * otherwise we'll run out of registers. We don't care about CET
++ * here, anyway.
++ */
++# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",   \
++      "       jmp    904f;\n"                                 \
++      "       .align 16\n"                                    \
++      "901:   call   903f;\n"                                 \
++      "902:   pause;\n"                                       \
++      "       jmp    902b;\n"                                 \
++      "       .align 16\n"                                    \
++      "903:   addl   $4, %%esp;\n"                            \
++      "       pushl  %[thunk_target];\n"                      \
++      "       ret;\n"                                         \
++      "       .align 16\n"                                    \
++      "904:   call   901b;\n",                                \
++      X86_FEATURE_RETPOLINE)
++
++# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
++#else /* No retpoline */
++# define CALL_NOSPEC "call *%[thunk_target]\n"
++# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
++#endif
++
++#endif /* __ASSEMBLY__ */
++#endif /* __NOSPEC_BRANCH_H__ */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -837,6 +837,10 @@ static void __init early_identify_cpu(st
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++#ifdef CONFIG_RETPOLINE
++      setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++#endif
++
+       fpu__init_system(c);
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -21,6 +21,7 @@ lib-y += usercopy_$(BITS).o usercopy.o g
+ lib-y += memcpy_$(BITS).o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
++lib-$(CONFIG_RETPOLINE) += retpoline.o
+ obj-y += msr.o msr-reg.o msr-reg-export.o
+--- /dev/null
++++ b/arch/x86/lib/retpoline.S
+@@ -0,0 +1,48 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#include <linux/stringify.h>
++#include <linux/linkage.h>
++#include <asm/dwarf2.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative-asm.h>
++#include <asm-generic/export.h>
++#include <asm/nospec-branch.h>
++
++.macro THUNK reg
++      .section .text.__x86.indirect_thunk.\reg
++
++ENTRY(__x86_indirect_thunk_\reg)
++      CFI_STARTPROC
++      JMP_NOSPEC %\reg
++      CFI_ENDPROC
++ENDPROC(__x86_indirect_thunk_\reg)
++.endm
++
++/*
++ * Despite being an assembler file we can't just use .irp here
++ * because __KSYM_DEPS__ only uses the C preprocessor and would
++ * only see one instance of "__x86_indirect_thunk_\reg" rather
++ * than one per register with the correct names. So we do it
++ * the simple and nasty way...
++ */
++#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
++#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
++
++GENERATE_THUNK(_ASM_AX)
++GENERATE_THUNK(_ASM_BX)
++GENERATE_THUNK(_ASM_CX)
++GENERATE_THUNK(_ASM_DX)
++GENERATE_THUNK(_ASM_SI)
++GENERATE_THUNK(_ASM_DI)
++GENERATE_THUNK(_ASM_BP)
++GENERATE_THUNK(_ASM_SP)
++#ifdef CONFIG_64BIT
++GENERATE_THUNK(r8)
++GENERATE_THUNK(r9)
++GENERATE_THUNK(r10)
++GENERATE_THUNK(r11)
++GENERATE_THUNK(r12)
++GENERATE_THUNK(r13)
++GENERATE_THUNK(r14)
++GENERATE_THUNK(r15)
++#endif
diff --git a/queue-4.4/x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch b/queue-4.4/x86-retpoline-checksum32-convert-assembler-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..63e4fc0
--- /dev/null
@@ -0,0 +1,67 @@
+From 5096732f6f695001fa2d6f1335a2680b37912c69 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:32 +0000
+Subject: x86/retpoline/checksum32: Convert assembler indirect jumps
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 5096732f6f695001fa2d6f1335a2680b37912c69 upstream.
+
+Convert all indirect jumps in 32bit checksum assembler code to use
+non-speculative sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-11-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/lib/checksum_32.S |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-                              
++#include <asm/nospec-branch.h>
++
+ /*
+  * computes a partial checksum, e.g. for TCP/UDP fragments
+  */
+@@ -155,7 +156,7 @@ ENTRY(csum_partial)
+       negl %ebx
+       lea 45f(%ebx,%ebx,2), %ebx
+       testl %esi, %esi
+-      jmp *%ebx
++      JMP_NOSPEC %ebx
+       # Handle 2-byte-aligned regions
+ 20:   addw (%esi), %ax
+@@ -437,7 +438,7 @@ ENTRY(csum_partial_copy_generic)
+       andl $-32,%edx
+       lea 3f(%ebx,%ebx), %ebx
+       testl %esi, %esi 
+-      jmp *%ebx
++      JMP_NOSPEC %ebx
+ 1:    addl $64,%esi
+       addl $64,%edi 
+       SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
diff --git a/queue-4.4/x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch b/queue-4.4/x86-retpoline-crypto-convert-crypto-assembler-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..9207dda
--- /dev/null
@@ -0,0 +1,125 @@
+From 9697fa39efd3fc3692f2949d4045f393ec58450b Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:27 +0000
+Subject: x86/retpoline/crypto: Convert crypto assembler indirect jumps
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 9697fa39efd3fc3692f2949d4045f393ec58450b upstream.
+
+Convert all indirect jumps in crypto assembler code to use non-speculative
+sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-6-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/crypto/aesni-intel_asm.S            |    5 +++--
+ arch/x86/crypto/camellia-aesni-avx-asm_64.S  |    3 ++-
+ arch/x86/crypto/camellia-aesni-avx2-asm_64.S |    3 ++-
+ arch/x86/crypto/crc32c-pcl-intel-asm_64.S    |    3 ++-
+ 4 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -31,6 +31,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/nospec-branch.h>
+ /*
+  * The following macros are used to move an (un)aligned 16 byte value to/from
+@@ -2714,7 +2715,7 @@ ENTRY(aesni_xts_crypt8)
+       pxor INC, STATE4
+       movdqu IV, 0x30(OUTP)
+-      call *%r11
++      CALL_NOSPEC %r11
+       movdqu 0x00(OUTP), INC
+       pxor INC, STATE1
+@@ -2759,7 +2760,7 @@ ENTRY(aesni_xts_crypt8)
+       _aesni_gf128mul_x_ble()
+       movups IV, (IVP)
+-      call *%r11
++      CALL_NOSPEC %r11
+       movdqu 0x40(OUTP), INC
+       pxor INC, STATE1
+--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+@@ -16,6 +16,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/nospec-branch.h>
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+@@ -1210,7 +1211,7 @@ camellia_xts_crypt_16way:
+       vpxor 14 * 16(%rax), %xmm15, %xmm14;
+       vpxor 15 * 16(%rax), %xmm15, %xmm15;
+-      call *%r9;
++      CALL_NOSPEC %r9;
+       addq $(16 * 16), %rsp;
+--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+@@ -11,6 +11,7 @@
+  */
+ #include <linux/linkage.h>
++#include <asm/nospec-branch.h>
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+@@ -1323,7 +1324,7 @@ camellia_xts_crypt_32way:
+       vpxor 14 * 32(%rax), %ymm15, %ymm14;
+       vpxor 15 * 32(%rax), %ymm15, %ymm15;
+-      call *%r9;
++      CALL_NOSPEC %r9;
+       addq $(16 * 32), %rsp;
+--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+@@ -45,6 +45,7 @@
+ #include <asm/inst.h>
+ #include <linux/linkage.h>
++#include <asm/nospec-branch.h>
+ ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
+@@ -172,7 +173,7 @@ continue_block:
+       movzxw  (bufp, %rax, 2), len
+       offset=crc_array-jump_table
+       lea     offset(bufp, len, 1), bufp
+-      jmp     *bufp
++      JMP_NOSPEC bufp
+       ################################################################
+       ## 2a) PROCESS FULL BLOCKS:
diff --git a/queue-4.4/x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch b/queue-4.4/x86-retpoline-entry-convert-entry-assembler-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..a5b8908
--- /dev/null
@@ -0,0 +1,122 @@
+From 2641f08bb7fc63a636a2b18173221d7040a3512e Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:28 +0000
+Subject: x86/retpoline/entry: Convert entry assembler indirect jumps
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 2641f08bb7fc63a636a2b18173221d7040a3512e upstream.
+
+Convert indirect jumps in core 32/64bit entry assembler code to use
+non-speculative sequences when CONFIG_RETPOLINE is enabled.
+
+Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
+address after the 'call' instruction must be *precisely* at the
+.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
+and the use of alternatives will mess that up unless we play horrid
+games to prepend with NOPs and make the variants the same length. It's
+not worth it; in the case where we ALTERNATIVE out the retpoline, the
+first instruction at __x86.indirect_thunk.rax is going to be a bare
+jmp *%rax anyway.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-7-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_32.S |    5 +++--
+ arch/x86/entry/entry_64.S |   14 +++++++++++++-
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -44,6 +44,7 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/nospec-branch.h>
+       .section .entry.text, "ax"
+@@ -226,7 +227,7 @@ ENTRY(ret_from_kernel_thread)
+       pushl   $0x0202                         # Reset kernel eflags
+       popfl
+       movl    PT_EBP(%esp), %eax
+-      call    *PT_EBX(%esp)
++      CALL_NOSPEC PT_EBX(%esp)
+       movl    $0, PT_EAX(%esp)
+       /*
+@@ -938,7 +939,7 @@ error_code:
+       movl    %ecx, %es
+       TRACE_IRQS_OFF
+       movl    %esp, %eax                      # pt_regs pointer
+-      call    *%edi
++      CALL_NOSPEC %edi
+       jmp     ret_from_exception
+ END(page_fault)
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -36,6 +36,7 @@
+ #include <asm/smap.h>
+ #include <asm/pgtable_types.h>
+ #include <asm/kaiser.h>
++#include <asm/nospec-branch.h>
+ #include <linux/err.h>
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+@@ -184,7 +185,13 @@ entry_SYSCALL_64_fastpath:
+ #endif
+       ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
+       movq    %r10, %rcx
++#ifdef CONFIG_RETPOLINE
++      movq    sys_call_table(, %rax, 8), %rax
++      call    __x86_indirect_thunk_rax
++#else
+       call    *sys_call_table(, %rax, 8)
++#endif
++
+       movq    %rax, RAX(%rsp)
+ 1:
+ /*
+@@ -276,7 +283,12 @@ tracesys_phase2:
+ #endif
+       ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
+       movq    %r10, %rcx                      /* fixup for C */
++#ifdef CONFIG_RETPOLINE
++      movq    sys_call_table(, %rax, 8), %rax
++      call    __x86_indirect_thunk_rax
++#else
+       call    *sys_call_table(, %rax, 8)
++#endif
+       movq    %rax, RAX(%rsp)
+ 1:
+       /* Use IRET because user could have changed pt_regs->foo */
+@@ -491,7 +503,7 @@ ENTRY(ret_from_fork)
+        * nb: we depend on RESTORE_EXTRA_REGS above
+        */
+       movq    %rbp, %rdi
+-      call    *%rbx
++      CALL_NOSPEC %rbx
+       movl    $0, RAX(%rsp)
+       RESTORE_EXTRA_REGS
+       jmp     int_ret_from_sys_call
diff --git a/queue-4.4/x86-retpoline-fill-return-stack-buffer-on-vmexit.patch b/queue-4.4/x86-retpoline-fill-return-stack-buffer-on-vmexit.patch
new file mode 100644 (file)
index 0000000..17c2f34
--- /dev/null
@@ -0,0 +1,188 @@
+From 117cc7a908c83697b0b737d15ae1eb5943afe35b Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Fri, 12 Jan 2018 11:11:27 +0000
+Subject: x86/retpoline: Fill return stack buffer on vmexit
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 117cc7a908c83697b0b737d15ae1eb5943afe35b upstream.
+
+In accordance with the Intel and AMD documentation, we need to overwrite
+all entries in the RSB on exiting a guest, to prevent malicious branch
+target predictions from affecting the host kernel. This is needed both
+for retpoline and for IBRS.
+
+[ak: numbers again for the RSB stuffing labels]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515755487-8524-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/nospec-branch.h |   76 ++++++++++++++++++++++++++++++++++-
+ arch/x86/kvm/svm.c                   |    4 +
+ arch/x86/kvm/vmx.c                   |    4 +
+ 3 files changed, 83 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -7,6 +7,48 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeature.h>
++/*
++ * Fill the CPU return stack buffer.
++ *
++ * Each entry in the RSB, if used for a speculative 'ret', contains an
++ * infinite 'pause; jmp' loop to capture speculative execution.
++ *
++ * This is required in various cases for retpoline and IBRS-based
++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
++ * eliminate potentially bogus entries from the RSB, and sometimes
++ * purely to ensure that it doesn't get empty, which on some CPUs would
++ * allow predictions from other (unwanted!) sources to be used.
++ *
++ * We define a CPP macro such that it can be used from both .S files and
++ * inline assembly. It's possible to do a .macro and then include that
++ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
++ */
++
++#define RSB_CLEAR_LOOPS               32      /* To forcibly overwrite all entries */
++#define RSB_FILL_LOOPS                16      /* To avoid underflow */
++
++/*
++ * Google experimented with loop-unrolling and this turned out to be
++ * the optimal version â€” two calls, each with their own speculation
++ * trap should their return address end up getting used, in a loop.
++ */
++#define __FILL_RETURN_BUFFER(reg, nr, sp)     \
++      mov     $(nr/2), reg;                   \
++771:                                          \
++      call    772f;                           \
++773:  /* speculation trap */                  \
++      pause;                                  \
++      jmp     773b;                           \
++772:                                          \
++      call    774f;                           \
++775:  /* speculation trap */                  \
++      pause;                                  \
++      jmp     775b;                           \
++774:                                          \
++      dec     reg;                            \
++      jnz     771b;                           \
++      add     $(BITS_PER_LONG/8) * nr, sp;
++
+ #ifdef __ASSEMBLY__
+ /*
+@@ -61,6 +103,19 @@
+ #endif
+ .endm
++ /*
++  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
++  * monstrosity above, manually.
++  */
++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
++#ifdef CONFIG_RETPOLINE
++      ALTERNATIVE "jmp .Lskip_rsb_\@",                                \
++              __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))    \
++              \ftr
++.Lskip_rsb_\@:
++#endif
++.endm
++
+ #else /* __ASSEMBLY__ */
+ #if defined(CONFIG_X86_64) && defined(RETPOLINE)
+@@ -97,7 +152,7 @@
+       X86_FEATURE_RETPOLINE)
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+-#else /* No retpoline */
++#else /* No retpoline for C / inline asm */
+ # define CALL_NOSPEC "call *%[thunk_target]\n"
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+ #endif
+@@ -112,5 +167,24 @@ enum spectre_v2_mitigation {
+       SPECTRE_V2_IBRS,
+ };
++/*
++ * On VMEXIT we must ensure that no RSB predictions learned in the guest
++ * can be followed in the host, by overwriting the RSB completely. Both
++ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
++ * CPUs with IBRS_ATT *might* it be avoided.
++ */
++static inline void vmexit_fill_RSB(void)
++{
++#ifdef CONFIG_RETPOLINE
++      unsigned long loops = RSB_CLEAR_LOOPS / 2;
++
++      asm volatile (ALTERNATIVE("jmp 910f",
++                                __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
++                                X86_FEATURE_RETPOLINE)
++                    "910:"
++                    : "=&r" (loops), ASM_CALL_CONSTRAINT
++                    : "r" (loops) : "memory" );
++#endif
++}
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -37,6 +37,7 @@
+ #include <asm/desc.h>
+ #include <asm/debugreg.h>
+ #include <asm/kvm_para.h>
++#include <asm/nospec-branch.h>
+ #include <asm/virtext.h>
+ #include "trace.h"
+@@ -3904,6 +3905,9 @@ static void svm_vcpu_run(struct kvm_vcpu
+ #endif
+               );
++      /* Eliminate branch target predictions from guest mode */
++      vmexit_fill_RSB();
++
+ #ifdef CONFIG_X86_64
+       wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+ #else
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -47,6 +47,7 @@
+ #include <asm/kexec.h>
+ #include <asm/apic.h>
+ #include <asm/irq_remapping.h>
++#include <asm/nospec-branch.h>
+ #include "trace.h"
+ #include "pmu.h"
+@@ -8701,6 +8702,9 @@ static void __noclone vmx_vcpu_run(struc
+ #endif
+             );
++      /* Eliminate branch target predictions from guest mode */
++      vmexit_fill_RSB();
++
+       /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+       if (debugctlmsr)
+               update_debugctlmsr(debugctlmsr);
diff --git a/queue-4.4/x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch b/queue-4.4/x86-retpoline-ftrace-convert-ftrace-assembler-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..f35a729
--- /dev/null
@@ -0,0 +1,89 @@
+From 9351803bd803cdbeb9b5a7850b7b6f464806e3db Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:29 +0000
+Subject: x86/retpoline/ftrace: Convert ftrace assembler indirect jumps
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 9351803bd803cdbeb9b5a7850b7b6f464806e3db upstream.
+
+Convert all indirect jumps in ftrace assembler code to use non-speculative
+sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-8-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/entry_32.S   |    5 +++--
+ arch/x86/kernel/mcount_64.S |    7 ++++---
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -862,7 +862,8 @@ trace:
+       movl    0x4(%ebp), %edx
+       subl    $MCOUNT_INSN_SIZE, %eax
+-      call    *ftrace_trace_function
++      movl    ftrace_trace_function, %ecx
++      CALL_NOSPEC %ecx
+       popl    %edx
+       popl    %ecx
+@@ -897,7 +898,7 @@ return_to_handler:
+       movl    %eax, %ecx
+       popl    %edx
+       popl    %eax
+-      jmp     *%ecx
++      JMP_NOSPEC %ecx
+ #endif
+ #ifdef CONFIG_TRACING
+--- a/arch/x86/kernel/mcount_64.S
++++ b/arch/x86/kernel/mcount_64.S
+@@ -7,7 +7,7 @@
+ #include <linux/linkage.h>
+ #include <asm/ptrace.h>
+ #include <asm/ftrace.h>
+-
++#include <asm/nospec-branch.h>
+       .code64
+       .section .entry.text, "ax"
+@@ -285,8 +285,9 @@ trace:
+        * ip and parent ip are used and the list function is called when
+        * function tracing is enabled.
+        */
+-      call   *ftrace_trace_function
++      movq ftrace_trace_function, %r8
++      CALL_NOSPEC %r8
+       restore_mcount_regs
+       jmp fgraph_trace
+@@ -329,5 +330,5 @@ GLOBAL(return_to_handler)
+       movq 8(%rsp), %rdx
+       movq (%rsp), %rax
+       addq $24, %rsp
+-      jmp *%rdi
++      JMP_NOSPEC %rdi
+ #endif
diff --git a/queue-4.4/x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch b/queue-4.4/x86-retpoline-hyperv-convert-assembler-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..07e14e2
--- /dev/null
@@ -0,0 +1,75 @@
+From e70e5892b28c18f517f29ab6e83bd57705104b31 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:30 +0000
+Subject: x86/retpoline/hyperv: Convert assembler indirect jumps
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit e70e5892b28c18f517f29ab6e83bd57705104b31 upstream.
+
+Convert all indirect jumps in hyperv inline asm code to use non-speculative
+sequences when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-9-git-send-email-dwmw@amazon.co.uk
+[ backport to 4.4, hopefully correct, not tested... - gregkh ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/hv.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -31,6 +31,7 @@
+ #include <linux/clockchips.h>
+ #include <asm/hyperv.h>
+ #include <asm/mshyperv.h>
++#include <asm/nospec-branch.h>
+ #include "hyperv_vmbus.h"
+ /* The one and only */
+@@ -103,9 +104,10 @@ static u64 do_hypercall(u64 control, voi
+               return (u64)ULLONG_MAX;
+       __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
+-      __asm__ __volatile__("call *%3" : "=a" (hv_status) :
++      __asm__ __volatile__(CALL_NOSPEC :
++                           "=a" (hv_status) :
+                            "c" (control), "d" (input_address),
+-                           "m" (hypercall_page));
++                           THUNK_TARGET(hypercall_page));
+       return hv_status;
+@@ -123,11 +125,12 @@ static u64 do_hypercall(u64 control, voi
+       if (!hypercall_page)
+               return (u64)ULLONG_MAX;
+-      __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
++      __asm__ __volatile__ (CALL_NOSPEC : "=d"(hv_status_hi),
+                             "=a"(hv_status_lo) : "d" (control_hi),
+                             "a" (control_lo), "b" (input_address_hi),
+                             "c" (input_address_lo), "D"(output_address_hi),
+-                            "S"(output_address_lo), "m" (hypercall_page));
++                            "S"(output_address_lo),
++                            THUNK_TARGET(hypercall_page));
+       return hv_status_lo | ((u64)hv_status_hi << 32);
+ #endif /* !x86_64 */
diff --git a/queue-4.4/x86-retpoline-irq32-convert-assembler-indirect-jumps.patch b/queue-4.4/x86-retpoline-irq32-convert-assembler-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..b2fa798
--- /dev/null
@@ -0,0 +1,76 @@
+From 7614e913db1f40fff819b36216484dc3808995d4 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 11 Jan 2018 21:46:33 +0000
+Subject: x86/retpoline/irq32: Convert assembler indirect jumps
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 7614e913db1f40fff819b36216484dc3808995d4 upstream.
+
+Convert all indirect jumps in 32bit irq inline asm code to use non
+speculative sequences.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-12-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Razvan Ghitulete <rga@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/irq_32.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -20,6 +20,7 @@
+ #include <linux/mm.h>
+ #include <asm/apic.h>
++#include <asm/nospec-branch.h>
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softi
+ static void call_on_stack(void *func, void *stack)
+ {
+       asm volatile("xchgl     %%ebx,%%esp     \n"
+-                   "call      *%%edi          \n"
++                   CALL_NOSPEC
+                    "movl      %%ebx,%%esp     \n"
+                    : "=b" (stack)
+                    : "0" (stack),
+-                     "D"(func)
++                     [thunk_target] "D"(func)
+                    : "memory", "cc", "edx", "ecx", "eax");
+ }
+@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(i
+               call_on_stack(print_stack_overflow, isp);
+       asm volatile("xchgl     %%ebx,%%esp     \n"
+-                   "call      *%%edi          \n"
++                   CALL_NOSPEC
+                    "movl      %%ebx,%%esp     \n"
+                    : "=a" (arg1), "=b" (isp)
+                    :  "0" (desc),   "1" (isp),
+-                      "D" (desc->handle_irq)
++                      [thunk_target] "D" (desc->handle_irq)
+                    : "memory", "cc", "ecx");
+       return 1;
+ }
diff --git a/queue-4.4/x86-retpoline-remove-compile-time-warning.patch b/queue-4.4/x86-retpoline-remove-compile-time-warning.patch
new file mode 100644 (file)
index 0000000..faa5731
--- /dev/null
@@ -0,0 +1,60 @@
+From b8b9ce4b5aec8de9e23cabb0a26b78641f9ab1d6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 14 Jan 2018 22:13:29 +0100
+Subject: x86/retpoline: Remove compile time warning
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b8b9ce4b5aec8de9e23cabb0a26b78641f9ab1d6 upstream.
+
+Remove the compile time warning when CONFIG_RETPOLINE=y and the compiler
+does not have retpoline support. Linus rationale for this is:
+
+  It's wrong because it will just make people turn off RETPOLINE, and the
+  asm updates - and return stack clearing - that are independent of the
+  compiler are likely the most important parts because they are likely the
+  ones easiest to target.
+
+  And it's annoying because most people won't be able to do anything about
+  it. The number of people building their own compiler? Very small. So if
+  their distro hasn't got a compiler yet (and pretty much nobody does), the
+  warning is just annoying crap.
+
+  It is already properly reported as part of the sysfs interface. The
+  compile-time warning only encourages bad things.
+
+Fixes: 76b043848fd2 ("x86/retpoline: Add initial retpoline support")
+Requested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Link: https://lkml.kernel.org/r/CA+55aFzWgquv4i6Mab6bASqYXg3ErV3XDFEYf=GEcCDQg5uAtw@mail.gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Makefile |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -194,8 +194,6 @@ ifdef CONFIG_RETPOLINE
+     RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+     ifneq ($(RETPOLINE_CFLAGS),)
+         KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+-    else
+-        $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.)
+     endif
+ endif
diff --git a/queue-4.4/x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch b/queue-4.4/x86-retpoline-xen-convert-xen-hypercall-indirect-jumps.patch
new file mode 100644 (file)
index 0000000..0715340
--- /dev/null
@@ -0,0 +1,61 @@
+From ea08816d5b185ab3d09e95e393f265af54560350 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:31 +0000
+Subject: x86/retpoline/xen: Convert Xen hypercall indirect jumps
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit ea08816d5b185ab3d09e95e393f265af54560350 upstream.
+
+Convert indirect call in Xen hypercall to use non-speculative sequence,
+when CONFIG_RETPOLINE is enabled.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-10-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/xen/hypercall.h |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -44,6 +44,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/smap.h>
++#include <asm/nospec-branch.h>
+ #include <xen/interface/xen.h>
+ #include <xen/interface/sched.h>
+@@ -215,9 +216,9 @@ privcmd_call(unsigned call,
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+       stac();
+-      asm volatile("call *%[call]"
++      asm volatile(CALL_NOSPEC
+                    : __HYPERCALL_5PARAM
+-                   : [call] "a" (&hypercall_page[call])
++                   : [thunk_target] "a" (&hypercall_page[call])
+                    : __HYPERCALL_CLOBBER5);
+       clac();
diff --git a/queue-4.4/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch b/queue-4.4/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
new file mode 100644 (file)
index 0000000..ff05624
--- /dev/null
@@ -0,0 +1,317 @@
+From da285121560e769cc31797bba6422eea71d473e0 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 11 Jan 2018 21:46:26 +0000
+Subject: x86/spectre: Add boot time option to select Spectre v2 mitigation
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit da285121560e769cc31797bba6422eea71d473e0 upstream.
+
+Add a spectre_v2= option to select the mitigation used for the indirect
+branch speculation vulnerability.
+
+Currently, the only option available is retpoline, in its various forms.
+This will be expanded to cover the new IBRS/IBPB microcode features.
+
+The RETPOLINE_AMD feature relies on a serializing LFENCE for speculation
+control. For AMD hardware, only set RETPOLINE_AMD if LFENCE is a
+serializing instruction, which is indicated by the LFENCE_RDTSC feature.
+
+[ tglx: Folded back the LFENCE/AMD fixes and reworked it so IBRS
+       integration becomes simple ]
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515707194-20531-5-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/kernel-parameters.txt  |   28 ++++++
+ arch/x86/include/asm/nospec-branch.h |   10 ++
+ arch/x86/kernel/cpu/bugs.c           |  158 ++++++++++++++++++++++++++++++++++-
+ arch/x86/kernel/cpu/common.c         |    4 
+ 4 files changed, 195 insertions(+), 5 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2452,6 +2452,11 @@ bytes respectively. Such letter suffixes
+       nohugeiomap     [KNL,x86] Disable kernel huge I/O mappings.
++      nospectre_v2    [X86] Disable all mitigations for the Spectre variant 2
++                      (indirect branch prediction) vulnerability. System may
++                      allow data leaks with this option, which is equivalent
++                      to spectre_v2=off.
++
+       noxsave         [BUGS=X86] Disables x86 extended register state save
+                       and restore using xsave. The kernel will fallback to
+                       enabling legacy floating-point and sse state.
+@@ -3594,6 +3599,29 @@ bytes respectively. Such letter suffixes
+       sonypi.*=       [HW] Sony Programmable I/O Control Device driver
+                       See Documentation/laptops/sonypi.txt
++      spectre_v2=     [X86] Control mitigation of Spectre variant 2
++                      (indirect branch speculation) vulnerability.
++
++                      on   - unconditionally enable
++                      off  - unconditionally disable
++                      auto - kernel detects whether your CPU model is
++                             vulnerable
++
++                      Selecting 'on' will, and 'auto' may, choose a
++                      mitigation method at run time according to the
++                      CPU, the available microcode, the setting of the
++                      CONFIG_RETPOLINE configuration option, and the
++                      compiler with which the kernel was built.
++
++                      Specific mitigations can also be selected manually:
++
++                      retpoline         - replace indirect branches
++                      retpoline,generic - google's original retpoline
++                      retpoline,amd     - AMD-specific minimal thunk
++
++                      Not specifying this option is equivalent to
++                      spectre_v2=auto.
++
+       spia_io_base=   [HW,MTD]
+       spia_fio_base=
+       spia_pedr=
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -102,5 +102,15 @@
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+ #endif
++/* The Spectre V2 mitigation variants */
++enum spectre_v2_mitigation {
++      SPECTRE_V2_NONE,
++      SPECTRE_V2_RETPOLINE_MINIMAL,
++      SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
++      SPECTRE_V2_RETPOLINE_GENERIC,
++      SPECTRE_V2_RETPOLINE_AMD,
++      SPECTRE_V2_IBRS,
++};
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -10,6 +10,9 @@
+ #include <linux/init.h>
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
++
++#include <asm/nospec-branch.h>
++#include <asm/cmdline.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+ #include <asm/processor-flags.h>
+@@ -20,6 +23,8 @@
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
++static void __init spectre_v2_select_mitigation(void);
++
+ void __init check_bugs(void)
+ {
+       identify_boot_cpu();
+@@ -29,6 +34,9 @@ void __init check_bugs(void)
+               print_cpu_info(&boot_cpu_data);
+       }
++      /* Select the proper spectre mitigation before patching alternatives */
++      spectre_v2_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+       /*
+        * Check whether we are able to run this kernel safely on SMP.
+@@ -61,6 +69,153 @@ void __init check_bugs(void)
+ #endif
+ }
++/* The kernel command line selection */
++enum spectre_v2_mitigation_cmd {
++      SPECTRE_V2_CMD_NONE,
++      SPECTRE_V2_CMD_AUTO,
++      SPECTRE_V2_CMD_FORCE,
++      SPECTRE_V2_CMD_RETPOLINE,
++      SPECTRE_V2_CMD_RETPOLINE_GENERIC,
++      SPECTRE_V2_CMD_RETPOLINE_AMD,
++};
++
++static const char *spectre_v2_strings[] = {
++      [SPECTRE_V2_NONE]                       = "Vulnerable",
++      [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
++      [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
++      [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
++      [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
++};
++
++#undef pr_fmt
++#define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
++
++static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++
++static void __init spec2_print_if_insecure(const char *reason)
++{
++      if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++              pr_info("%s\n", reason);
++}
++
++static void __init spec2_print_if_secure(const char *reason)
++{
++      if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++              pr_info("%s\n", reason);
++}
++
++static inline bool retp_compiler(void)
++{
++      return __is_defined(RETPOLINE);
++}
++
++static inline bool match_option(const char *arg, int arglen, const char *opt)
++{
++      int len = strlen(opt);
++
++      return len == arglen && !strncmp(arg, opt, len);
++}
++
++static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
++{
++      char arg[20];
++      int ret;
++
++      ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
++                                sizeof(arg));
++      if (ret > 0)  {
++              if (match_option(arg, ret, "off")) {
++                      goto disable;
++              } else if (match_option(arg, ret, "on")) {
++                      spec2_print_if_secure("force enabled on command line.");
++                      return SPECTRE_V2_CMD_FORCE;
++              } else if (match_option(arg, ret, "retpoline")) {
++                      spec2_print_if_insecure("retpoline selected on command line.");
++                      return SPECTRE_V2_CMD_RETPOLINE;
++              } else if (match_option(arg, ret, "retpoline,amd")) {
++                      if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
++                              pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
++                              return SPECTRE_V2_CMD_AUTO;
++                      }
++                      spec2_print_if_insecure("AMD retpoline selected on command line.");
++                      return SPECTRE_V2_CMD_RETPOLINE_AMD;
++              } else if (match_option(arg, ret, "retpoline,generic")) {
++                      spec2_print_if_insecure("generic retpoline selected on command line.");
++                      return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
++              } else if (match_option(arg, ret, "auto")) {
++                      return SPECTRE_V2_CMD_AUTO;
++              }
++      }
++
++      if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
++              return SPECTRE_V2_CMD_AUTO;
++disable:
++      spec2_print_if_insecure("disabled on command line.");
++      return SPECTRE_V2_CMD_NONE;
++}
++
++static void __init spectre_v2_select_mitigation(void)
++{
++      enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
++      enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
++
++      /*
++       * If the CPU is not affected and the command line mode is NONE or AUTO
++       * then nothing to do.
++       */
++      if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
++          (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
++              return;
++
++      switch (cmd) {
++      case SPECTRE_V2_CMD_NONE:
++              return;
++
++      case SPECTRE_V2_CMD_FORCE:
++              /* FALLTRHU */
++      case SPECTRE_V2_CMD_AUTO:
++              goto retpoline_auto;
++
++      case SPECTRE_V2_CMD_RETPOLINE_AMD:
++              if (IS_ENABLED(CONFIG_RETPOLINE))
++                      goto retpoline_amd;
++              break;
++      case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
++              if (IS_ENABLED(CONFIG_RETPOLINE))
++                      goto retpoline_generic;
++              break;
++      case SPECTRE_V2_CMD_RETPOLINE:
++              if (IS_ENABLED(CONFIG_RETPOLINE))
++                      goto retpoline_auto;
++              break;
++      }
++      pr_err("kernel not compiled with retpoline; no mitigation available!");
++      return;
++
++retpoline_auto:
++      if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++      retpoline_amd:
++              if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
++                      pr_err("LFENCE not serializing. Switching to generic retpoline\n");
++                      goto retpoline_generic;
++              }
++              mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
++                                       SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
++              setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
++              setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++      } else {
++      retpoline_generic:
++              mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
++                                       SPECTRE_V2_RETPOLINE_MINIMAL;
++              setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++      }
++
++      spectre_v2_enabled = mode;
++      pr_info("%s\n", spectre_v2_strings[mode]);
++}
++
++#undef pr_fmt
++
+ #ifdef CONFIG_SYSFS
+ ssize_t cpu_show_meltdown(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+@@ -85,6 +240,7 @@ ssize_t cpu_show_spectre_v2(struct devic
+ {
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return sprintf(buf, "Not affected\n");
+-      return sprintf(buf, "Vulnerable\n");
++
++      return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
+ }
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -837,10 +837,6 @@ static void __init early_identify_cpu(st
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+-#ifdef CONFIG_RETPOLINE
+-      setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+-#endif
+-
+       fpu__init_system(c);
+ #ifdef CONFIG_X86_32