--- /dev/null
+Subject: KVM: VMX: Make indirect call speculation safe
+From: Peter Zijlstra peterz@infradead.org
+Date: Thu Jan 25 10:58:14 2018 +0100
+
+From: Peter Zijlstra peterz@infradead.org
+
+commit c940a3fb1e2e9b7d03228ab28f375fb5a47ff699
+
+Replace indirect call with CALL_NOSPEC.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Jun Nakajima <jun.nakajima@intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: rga@amazon.de
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20180125095843.645776917@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9118,14 +9118,14 @@ static void vmx_handle_external_intr(str
+ #endif
+ "pushf\n\t"
+ __ASM_SIZE(push) " $%c[cs]\n\t"
+- "call *%[entry]\n\t"
++ CALL_NOSPEC
+ :
+ #ifdef CONFIG_X86_64
+ [sp]"=&r"(tmp),
+ #endif
+ ASM_CALL_CONSTRAINT
+ :
+- [entry]"r"(entry),
++ THUNK_TARGET(entry),
+ [ss]"i"(__KERNEL_DS),
+ [cs]"i"(__KERNEL_CS)
+ );
--- /dev/null
+Subject: KVM: x86: Make indirect calls in emulator speculation safe
+From: Peter Zijlstra peterz@infradead.org
+Date: Thu Jan 25 10:58:13 2018 +0100
+
+From: Peter Zijlstra peterz@infradead.org
+
+commit 1a29b5b7f347a1a9230c1e0af5b37e3e571588ab
+
+Replace the indirect calls with CALL_NOSPEC.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Jun Nakajima <jun.nakajima@intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: rga@amazon.de
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20180125095843.595615683@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/kvm/emulate.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -25,6 +25,7 @@
+ #include <asm/kvm_emulate.h>
+ #include <linux/stringify.h>
+ #include <asm/debugreg.h>
++#include <asm/nospec-branch.h>
+
+ #include "x86.h"
+ #include "tss.h"
+@@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsign
+ void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
+
+ flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
+- asm("push %[flags]; popf; call *%[fastop]"
+- : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
++ asm("push %[flags]; popf; " CALL_NOSPEC
++ : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
+ return rc;
+ }
+
+@@ -5350,9 +5351,9 @@ static int fastop(struct x86_emulate_ctx
+ if (!(ctxt->d & ByteOp))
+ fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
+
+- asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
++ asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
+ : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
+- [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
++ [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
+ : "c"(ctxt->src2.val));
+
+ ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
--- /dev/null
+Subject: module/retpoline: Warn about missing retpoline in module
+From: Andi Kleen ak@linux.intel.com
+Date: Thu Jan 25 15:50:28 2018 -0800
+
+From: Andi Kleen ak@linux.intel.com
+
+commit caf7501a1b4ec964190f31f9c3f163de252273b8
+
+There's a risk that a kernel which has full retpoline mitigations becomes
+vulnerable when a module gets loaded that hasn't been compiled with the
+right compiler or the right option.
+
+To enable detection of that mismatch at module load time, add a module info
+string "retpoline" at build time when the module was compiled with
+retpoline support. This only covers compiled C source, but assembler source
+or prebuilt object files are not checked.
+
+If a retpoline enabled kernel detects a non retpoline protected module at
+load time, print a warning and report it in the sysfs vulnerability file.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: gregkh@linuxfoundation.org
+Cc: torvalds@linux-foundation.org
+Cc: jeyu@kernel.org
+Cc: arjan@linux.intel.com
+Link: https://lkml.kernel.org/r/20180125235028.31211-1-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c | 17 ++++++++++++++++-
+ include/linux/module.h | 9 +++++++++
+ kernel/module.c | 11 +++++++++++
+ scripts/mod/modpost.c | 9 +++++++++
+ 4 files changed, 45 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -11,6 +11,7 @@
+ #include <linux/init.h>
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
++#include <linux/module.h>
+
+ #include <asm/nospec-branch.h>
+ #include <asm/cmdline.h>
+@@ -93,6 +94,19 @@ static const char *spectre_v2_strings[]
+ #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
+
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++static bool spectre_v2_bad_module;
++
++#ifdef RETPOLINE
++bool retpoline_module_ok(bool has_retpoline)
++{
++ if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
++ return true;
++
++ pr_err("System may be vunerable to spectre v2\n");
++ spectre_v2_bad_module = true;
++ return false;
++}
++#endif
+
+ static void __init spec2_print_if_insecure(const char *reason)
+ {
+@@ -278,6 +292,7 @@ ssize_t cpu_show_spectre_v2(struct devic
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
+
+- return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
++ return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ spectre_v2_bad_module ? " - vulnerable module loaded" : "");
+ }
+ #endif
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -794,6 +794,15 @@ static inline void module_bug_finalize(c
+ static inline void module_bug_cleanup(struct module *mod) {}
+ #endif /* CONFIG_GENERIC_BUG */
+
++#ifdef RETPOLINE
++extern bool retpoline_module_ok(bool has_retpoline);
++#else
++static inline bool retpoline_module_ok(bool has_retpoline)
++{
++ return true;
++}
++#endif
++
+ #ifdef CONFIG_MODULE_SIG
+ static inline bool module_sig_ok(struct module *module)
+ {
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2855,6 +2855,15 @@ static int check_modinfo_livepatch(struc
+ }
+ #endif /* CONFIG_LIVEPATCH */
+
++static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
++{
++ if (retpoline_module_ok(get_modinfo(info, "retpoline")))
++ return;
++
++ pr_warn("%s: loading module not compiled with retpoline compiler.\n",
++ mod->name);
++}
++
+ /* Sets info->hdr and info->len. */
+ static int copy_module_from_user(const void __user *umod, unsigned long len,
+ struct load_info *info)
+@@ -3021,6 +3030,8 @@ static int check_modinfo(struct module *
+ add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
+ }
+
++ check_modinfo_retpoline(mod, info);
++
+ if (get_modinfo(info, "staging")) {
+ add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
+ pr_warn("%s: module is from the staging directory, the quality "
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -2165,6 +2165,14 @@ static void add_intree_flag(struct buffe
+ buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
+ }
+
++/* Cannot check for assembler */
++static void add_retpoline(struct buffer *b)
++{
++ buf_printf(b, "\n#ifdef RETPOLINE\n");
++ buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
++ buf_printf(b, "#endif\n");
++}
++
+ static void add_staging_flag(struct buffer *b, const char *name)
+ {
+ static const char *staging_dir = "drivers/staging";
+@@ -2506,6 +2514,7 @@ int main(int argc, char **argv)
+ err |= check_modname_len(mod);
+ add_header(&buf, mod);
+ add_intree_flag(&buf, !external_module);
++ add_retpoline(&buf);
+ add_staging_flag(&buf, mod->name);
+ err |= add_versions(&buf, mod);
+ add_depends(&buf, mod, modules);
scripts-faddr2line-fix-cross_compile-unset-error.patch
powerpc-64s-wire-up-cpu_show_meltdown.patch
powerpc-64s-allow-control-of-rfi-flush-via-debugfs.patch
+x86retpoline_Remove_the_esprsp_thunk.patch
+KVM_x86_Make_indirect_calls_in_emulator_speculation_safe.patch
+KVM_VMX_Make_indirect_call_speculation_safe.patch
+moduleretpoline_Warn_about_missing_retpoline_in_module.patch
+x86cpufeatures_Add_CPUID_7_EDX_CPUID_leaf.patch
+x86cpufeatures_Add_Intel_feature_bits_for_Speculation_Control.patch
+x86cpufeatures_Add_AMD_feature_bits_for_Speculation_Control.patch
+x86msr_Add_definitions_for_new_speculation_control_MSRs.patch
+x86pti_Do_not_enable_PTI_on_CPUs_which_are_not_vulnerable_to_Meltdown.patch
+x86cpufeature_Blacklist_SPEC_CTRLPRED_CMD_on_early_Spectre_v2_microcodes.patch
+x86speculation_Add_basic_IBPB_(Indirect_Branch_Prediction_Barrier)_support.patch
+x86alternative_Print_unadorned_pointers.patch
+x86nospec_Fix_header_guards_names.patch
+x86bugs_Drop_one_mitigation_from_dmesg.patch
+x86cpubugs_Make_retpoline_module_warning_conditional.patch
+x86cpufeatures_Clean_up_Spectre_v2_related_CPUID_flags.patch
+x86retpoline_Simplify_vmexit_fill_RSB().patch
+x86speculation_Simplify_indirect_branch_prediction_barrier().patch
--- /dev/null
+Subject: x86/alternative: Print unadorned pointers
+From: Borislav Petkov bp@suse.de
+Date: Fri Jan 26 13:11:36 2018 +0100
+
+From: Borislav Petkov bp@suse.de
+
+commit 0e6c16c652cadaffd25a6bb326ec10da5bcec6b4
+
+After commit ad67b74d2469 ("printk: hash addresses printed with %p")
+pointers are being hashed when printed. However, this makes the alternative
+debug output completely useless. Switch to %px in order to see the
+unadorned kernel pointers.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: riel@redhat.com
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: jikos@kernel.org
+Cc: luto@amacapital.net
+Cc: dave.hansen@intel.com
+Cc: torvalds@linux-foundation.org
+Cc: keescook@google.com
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Cc: pjt@google.com
+Link: https://lkml.kernel.org/r/20180126121139.31959-2-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/kernel/alternative.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *
+ tgt_rip = next_rip + o_dspl;
+ n_dspl = tgt_rip - orig_insn;
+
+- DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
++ DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
+
+ if (tgt_rip - orig_insn >= 0) {
+ if (n_dspl - 2 <= 127)
+@@ -355,7 +355,7 @@ static void __init_or_module noinline op
+ add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+ local_irq_restore(flags);
+
+- DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
++ DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
+ instr, a->instrlen - a->padlen, a->padlen);
+ }
+
+@@ -376,7 +376,7 @@ void __init_or_module noinline apply_alt
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+- DPRINTK("alt table %p -> %p", start, end);
++ DPRINTK("alt table %px, -> %px", start, end);
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+@@ -400,14 +400,14 @@ void __init_or_module noinline apply_alt
+ continue;
+ }
+
+- DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
++ DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
+ a->cpuid >> 5,
+ a->cpuid & 0x1f,
+ instr, a->instrlen,
+ replacement, a->replacementlen, a->padlen);
+
+- DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
+- DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
++ DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
++ DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+@@ -433,7 +433,7 @@ void __init_or_module noinline apply_alt
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+- DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
++ DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
+
+ text_poke_early(instr, insnbuf, insnbuf_sz);
+ }
--- /dev/null
+Subject: x86/bugs: Drop one "mitigation" from dmesg
+From: Borislav Petkov bp@suse.de
+Date: Fri Jan 26 13:11:39 2018 +0100
+
+From: Borislav Petkov bp@suse.de
+
+commit 55fa19d3e51f33d9cd4056d25836d93abf9438db
+
+Make
+
+[ 0.031118] Spectre V2 mitigation: Mitigation: Full generic retpoline
+
+into
+
+[ 0.031118] Spectre V2: Mitigation: Full generic retpoline
+
+to reduce the mitigation mitigations strings.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: riel@redhat.com
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: jikos@kernel.org
+Cc: luto@amacapital.net
+Cc: dave.hansen@intel.com
+Cc: torvalds@linux-foundation.org
+Cc: keescook@google.com
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: tim.c.chen@linux.intel.com
+Cc: pjt@google.com
+Link: https://lkml.kernel.org/r/20180126121139.31959-5-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -91,7 +91,7 @@ static const char *spectre_v2_strings[]
+ };
+
+ #undef pr_fmt
+-#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
++#define pr_fmt(fmt) "Spectre V2 : " fmt
+
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+ static bool spectre_v2_bad_module;
--- /dev/null
+Subject: x86/cpu/bugs: Make retpoline module warning conditional
+From: Thomas Gleixner tglx@linutronix.de
+Date: Sat Jan 27 15:45:14 2018 +0100
+
+From: Thomas Gleixner tglx@linutronix.de
+
+commit e383095c7fe8d218e00ec0f83e4b95ed4e627b02
+
+If sysfs is disabled and RETPOLINE not defined:
+
+arch/x86/kernel/cpu/bugs.c:97:13: warning: ‘spectre_v2_bad_module’ defined but not used
+[-Wunused-variable]
+ static bool spectre_v2_bad_module;
+
+Hide it.
+
+Fixes: caf7501a1b4e ("module/retpoline: Warn about missing retpoline in module")
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -94,9 +94,10 @@ static const char *spectre_v2_strings[]
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+-static bool spectre_v2_bad_module;
+
+ #ifdef RETPOLINE
++static bool spectre_v2_bad_module;
++
+ bool retpoline_module_ok(bool has_retpoline)
+ {
+ if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
+@@ -106,6 +107,13 @@ bool retpoline_module_ok(bool has_retpol
+ spectre_v2_bad_module = true;
+ return false;
+ }
++
++static inline const char *spectre_v2_module_string(void)
++{
++ return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
++}
++#else
++static inline const char *spectre_v2_module_string(void) { return ""; }
+ #endif
+
+ static void __init spec2_print_if_insecure(const char *reason)
+@@ -300,7 +308,7 @@ ssize_t cpu_show_spectre_v2(struct devic
+ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+- boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "",
+- spectre_v2_bad_module ? " - vulnerable module loaded" : "");
++ boot_cpu_has(X86_FEATURE_IBPB) ? ", IBPB" : "",
++ spectre_v2_module_string());
+ }
+ #endif
--- /dev/null
+Subject: x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:14 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit a5b2966364538a0e68c9fa29bc0a3a1651799035
+
+This doesn't refuse to load the affected microcodes; it just refuses to
+use the Spectre v2 mitigation features if they're detected, by clearing
+the appropriate feature bits.
+
+The AMD CPUID bits are handled here too, because hypervisors *may* have
+been exposing those bits even on Intel chips, for fine-grained control
+of what's available.
+
+It is non-trivial to use x86_match_cpu() for this table because that
+doesn't handle steppings. And the approach taken in commit bd9240a18
+almost made me lose my lunch.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-7-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/kernel/cpu/intel.c | 66 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 66 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -102,6 +102,59 @@ static void probe_xeon_phi_r3mwait(struc
+ ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
+ }
+
++/*
++ * Early microcode releases for the Spectre v2 mitigation were broken.
++ * Information taken from;
++ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
++ * - https://kb.vmware.com/s/article/52345
++ * - Microcode revisions observed in the wild
++ * - Release note from 20180108 microcode release
++ */
++struct sku_microcode {
++ u8 model;
++ u8 stepping;
++ u32 microcode;
++};
++static const struct sku_microcode spectre_bad_microcodes[] = {
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
++ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
++ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
++ { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
++ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
++ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
++ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
++ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
++ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
++ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
++ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
++ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
++ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
++ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
++ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
++ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
++ /* Updated in the 20180108 release; blacklist until we know otherwise */
++ { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
++ /* Observed in the wild */
++ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
++ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
++};
++
++static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
++ if (c->x86_model == spectre_bad_microcodes[i].model &&
++ c->x86_mask == spectre_bad_microcodes[i].stepping)
++ return (c->microcode <= spectre_bad_microcodes[i].microcode);
++ }
++ return false;
++}
++
+ static void early_init_intel(struct cpuinfo_x86 *c)
+ {
+ u64 misc_enable;
+@@ -122,6 +175,19 @@ static void early_init_intel(struct cpui
+ if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+ c->microcode = intel_get_microcode_revision();
+
++ if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
++ cpu_has(c, X86_FEATURE_STIBP) ||
++ cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) ||
++ cpu_has(c, X86_FEATURE_AMD_PRED_CMD) ||
++ cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) {
++ pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n");
++ clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
++ clear_cpu_cap(c, X86_FEATURE_STIBP);
++ clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL);
++ clear_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD);
++ clear_cpu_cap(c, X86_FEATURE_AMD_STIBP);
++ }
++
+ /*
+ * Atom erratum AAE44/AAF40/AAG38/AAH41:
+ *
--- /dev/null
+Subject: x86/cpufeatures: Add AMD feature bits for Speculation Control
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:11 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit 5d10cbc91d9eb5537998b65608441b592eec65e7
+
+AMD exposes the PRED_CMD/SPEC_CTRL MSRs slightly differently to Intel.
+See http://lkml.kernel.org/r/2b3e25cc-286d-8bd0-aeaf-9ac4aae39de8@amd.com
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-4-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/cpufeatures.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -269,6 +269,9 @@
+ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
+ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
++#define X86_FEATURE_AMD_PRED_CMD (13*32+12) /* Prediction Command MSR (AMD) */
++#define X86_FEATURE_AMD_SPEC_CTRL (13*32+14) /* Speculation Control MSR only (AMD) */
++#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors (AMD) */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
--- /dev/null
+Subject: x86/cpufeatures: Add CPUID_7_EDX CPUID leaf
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:09 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit 95ca0ee8636059ea2800dfbac9ecac6212d6b38f
+
+This is a pure feature bits leaf. There are two AVX512 feature bits in it
+already which were handled as scattered bits, and three more from this leaf
+are going to be added for speculation control features.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-2-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/cpufeature.h | 7 +++++--
+ arch/x86/include/asm/cpufeatures.h | 8 +++++---
+ arch/x86/include/asm/disabled-features.h | 3 ++-
+ arch/x86/include/asm/required-features.h | 3 ++-
+ arch/x86/kernel/cpu/common.c | 1 +
+ arch/x86/kernel/cpu/scattered.c | 2 --
+ 6 files changed, 15 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -29,6 +29,7 @@ enum cpuid_leafs
+ CPUID_8000_000A_EDX,
+ CPUID_7_ECX,
+ CPUID_8000_0007_EBX,
++ CPUID_7_EDX,
+ };
+
+ #ifdef CONFIG_X86_FEATURE_NAMES
+@@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
+ REQUIRED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+
+ #define DISABLED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
+@@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
++ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
+ DISABLED_MASK_CHECK || \
+- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
++ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+
+ #define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+ * Defines x86 CPU feature bits
+ */
+-#define NCAPINTS 18 /* N 32-bit words worth of info */
++#define NCAPINTS 19 /* N 32-bit words worth of info */
+ #define NBUGINTS 1 /* N 32-bit bug flags */
+
+ /*
+@@ -206,8 +206,6 @@
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+-#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
+-#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
+@@ -319,6 +317,10 @@
+ #define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
+ #define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
+
++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
++#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
++#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
++
+ /*
+ * BUG word(s)
+ */
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -71,6 +71,7 @@
+ #define DISABLED_MASK15 0
+ #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
+ #define DISABLED_MASK17 0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
++#define DISABLED_MASK18 0
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -106,6 +106,7 @@
+ #define REQUIRED_MASK15 0
+ #define REQUIRED_MASK16 (NEED_LA57)
+ #define REQUIRED_MASK17 0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
++#define REQUIRED_MASK18 0
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -745,6 +745,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
+ c->x86_capability[CPUID_7_0_EBX] = ebx;
+ c->x86_capability[CPUID_7_ECX] = ecx;
++ c->x86_capability[CPUID_7_EDX] = edx;
+ }
+
+ /* Extended state features: level 0x0000000d */
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -21,8 +21,6 @@ struct cpuid_bit {
+ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+- { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
+- { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
+ { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
+ { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
+ { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
--- /dev/null
+Subject: x86/cpufeatures: Add Intel feature bits for Speculation Control
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:10 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit fc67dd70adb711a45d2ef34e12d1a8be75edde61
+
+Add three feature bits exposed by new microcode on Intel CPUs for
+speculation control.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-3-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/cpufeatures.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -320,6 +320,9 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
++#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */
++#define X86_FEATURE_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+
+ /*
+ * BUG word(s)
--- /dev/null
+Subject: x86/cpufeatures: Clean up Spectre v2 related CPUID flags
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Sat Jan 27 16:24:32 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit 2961298efe1ea1b6fc0d7ee8b76018fa6c0bcef2
+
+We want to expose the hardware features simply in /proc/cpuinfo as "ibrs",
+"ibpb" and "stibp". Since AMD has separate CPUID bits for those, use them
+as the user-visible bits.
+
+When the Intel SPEC_CTRL bit is set which indicates both IBRS and IBPB
+capability, set those (AMD) bits accordingly. Likewise if the Intel STIBP
+bit is set, set the AMD STIBP that's used for the generic hardware
+capability.
+
+Hide the rest from /proc/cpuinfo by putting "" in the comments. Including
+RETPOLINE and RETPOLINE_AMD which shouldn't be visible there. There are
+patches to make the sysfs vulnerabilities information non-readable by
+non-root, and the same should apply to all information about which
+mitigations are actually in use. Those *shouldn't* appear in /proc/cpuinfo.
+
+The feature bit for whether IBPB is actually used, which is needed for
+ALTERNATIVEs, is renamed to X86_FEATURE_USE_IBPB.
+
+Originally-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1517070274-12128-2-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/cpufeatures.h | 18 +++++++++---------
+ arch/x86/include/asm/nospec-branch.h | 2 +-
+ arch/x86/kernel/cpu/bugs.c | 7 +++----
+ arch/x86/kernel/cpu/intel.c | 31 +++++++++++++++++++++----------
+ 4 files changed, 34 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -203,14 +203,14 @@
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+-#define X86_FEATURE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled*/
++#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -271,9 +271,9 @@
+ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
+ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+-#define X86_FEATURE_AMD_PRED_CMD (13*32+12) /* Prediction Command MSR (AMD) */
+-#define X86_FEATURE_AMD_SPEC_CTRL (13*32+14) /* Speculation Control MSR only (AMD) */
+-#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors (AMD) */
++#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+@@ -325,8 +325,8 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+-#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */
+-#define X86_FEATURE_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
++#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+
+ /*
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -225,7 +225,7 @@ static inline void indirect_branch_predi
+ "movl %[val], %%eax\n\t"
+ "movl $0, %%edx\n\t"
+ "wrmsr",
+- X86_FEATURE_IBPB)
++ X86_FEATURE_USE_IBPB)
+ : : [msr] "i" (MSR_IA32_PRED_CMD),
+ [val] "i" (PRED_CMD_IBPB)
+ : "eax", "ecx", "edx", "memory");
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -273,9 +273,8 @@ retpoline_auto:
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+- if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+- boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) {
+- setup_force_cpu_cap(X86_FEATURE_IBPB);
++ if (boot_cpu_has(X86_FEATURE_IBPB)) {
++ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ pr_info("Enabling Indirect Branch Prediction Barrier\n");
+ }
+ }
+@@ -308,7 +307,7 @@ ssize_t cpu_show_spectre_v2(struct devic
+ return sprintf(buf, "Not affected\n");
+
+ return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+- boot_cpu_has(X86_FEATURE_IBPB) ? ", IBPB" : "",
++ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ spectre_v2_module_string());
+ }
+ #endif
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -175,17 +175,28 @@ static void early_init_intel(struct cpui
+ if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+ c->microcode = intel_get_microcode_revision();
+
+- if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
+- cpu_has(c, X86_FEATURE_STIBP) ||
+- cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) ||
+- cpu_has(c, X86_FEATURE_AMD_PRED_CMD) ||
+- cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) {
+- pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n");
+- clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
++ /*
++ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
++ * and they also have a different bit for STIBP support. Also,
++ * a hypervisor might have set the individual AMD bits even on
++ * Intel CPUs, for finer-grained selection of what's available.
++ */
++ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
++ set_cpu_cap(c, X86_FEATURE_IBRS);
++ set_cpu_cap(c, X86_FEATURE_IBPB);
++ }
++ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
++ set_cpu_cap(c, X86_FEATURE_STIBP);
++
++ /* Now if any of them are set, check the blacklist and clear the lot */
++ if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
++ cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
++ pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
++ clear_cpu_cap(c, X86_FEATURE_IBRS);
++ clear_cpu_cap(c, X86_FEATURE_IBPB);
+ clear_cpu_cap(c, X86_FEATURE_STIBP);
+- clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL);
+- clear_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD);
+- clear_cpu_cap(c, X86_FEATURE_AMD_STIBP);
++ clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
++ clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
+ }
+
+ /*
--- /dev/null
+Subject: x86/msr: Add definitions for new speculation control MSRs
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:12 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit 1e340c60d0dd3ae07b5bedc16a0469c14b9f3410
+
+Add MSR and bit definitions for SPEC_CTRL, PRED_CMD and ARCH_CAPABILITIES.
+
+See Intel's 336996-Speculative-Execution-Side-Channel-Mitigations.pdf
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-5-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/msr-index.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -39,6 +39,13 @@
+
+ /* Intel MSRs. Some also available on other CPUs */
+
++#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
++#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
++#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
++
++#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
++#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
++
+ #define MSR_PPIN_CTL 0x0000004e
+ #define MSR_PPIN 0x0000004f
+
+@@ -57,6 +64,11 @@
+ #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
+
+ #define MSR_MTRRcap 0x000000fe
++
++#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
++#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
++#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
++
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+
--- /dev/null
+Subject: x86/nospec: Fix header guards names
+From: Borislav Petkov bp@suse.de
+Date: Fri Jan 26 13:11:37 2018 +0100
+
+From: Borislav Petkov bp@suse.de
+
+commit 7a32fc51ca938e67974cbb9db31e1a43f98345a9
+
+... to adhere to the _ASM_X86_ naming scheme.
+
+No functional change.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: riel@redhat.com
+Cc: ak@linux.intel.com
+Cc: peterz@infradead.org
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: jikos@kernel.org
+Cc: luto@amacapital.net
+Cc: dave.hansen@intel.com
+Cc: torvalds@linux-foundation.org
+Cc: keescook@google.com
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Cc: pjt@google.com
+Link: https://lkml.kernel.org/r/20180126121139.31959-3-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/nospec-branch.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
+-#ifndef __NOSPEC_BRANCH_H__
+-#define __NOSPEC_BRANCH_H__
++#ifndef _ASM_X86_NOSPEC_BRANCH_H_
++#define _ASM_X86_NOSPEC_BRANCH_H_
+
+ #include <asm/alternative.h>
+ #include <asm/alternative-asm.h>
+@@ -232,4 +232,4 @@ static inline void indirect_branch_predi
+ }
+
+ #endif /* __ASSEMBLY__ */
+-#endif /* __NOSPEC_BRANCH_H__ */
++#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
--- /dev/null
+Subject: x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:13 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit fec9434a12f38d3aeafeb75711b71d8a1fdef621
+
+Also, for CPUs which don't speculate at all, don't report that they're
+vulnerable to the Spectre variants either.
+
+Leave the cpu_no_meltdown[] match table with just X86_VENDOR_AMD in it
+for now, even though that could be done with a simple comparison, on the
+assumption that we'll have more to add.
+
+Based on suggestions from Dave Hansen and Alan Cox.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-6-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/kernel/cpu/common.c | 48 ++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 43 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -47,6 +47,8 @@
+ #include <asm/pat.h>
+ #include <asm/microcode.h>
+ #include <asm/microcode_intel.h>
++#include <asm/intel-family.h>
++#include <asm/cpu_device_id.h>
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ #include <asm/uv/uv.h>
+@@ -853,6 +855,41 @@ static void identify_cpu_without_cpuid(s
+ #endif
+ }
+
++static const __initdata struct x86_cpu_id cpu_no_speculation[] = {
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
++ { X86_VENDOR_CENTAUR, 5 },
++ { X86_VENDOR_INTEL, 5 },
++ { X86_VENDOR_NSC, 5 },
++ { X86_VENDOR_ANY, 4 },
++ {}
++};
++
++static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
++ { X86_VENDOR_AMD },
++ {}
++};
++
++static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
++{
++ u64 ia32_cap = 0;
++
++ if (x86_match_cpu(cpu_no_meltdown))
++ return false;
++
++ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++
++ /* Rogue Data Cache Load? No! */
++ if (ia32_cap & ARCH_CAP_RDCL_NO)
++ return false;
++
++ return true;
++}
++
+ /*
+ * Do minimum CPU detection early.
+ * Fields really needed: vendor, cpuid_level, family, model, mask,
+@@ -900,11 +937,12 @@ static void __init early_identify_cpu(st
+
+ setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+- if (c->x86_vendor != X86_VENDOR_AMD)
+- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+-
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++ if (!x86_match_cpu(cpu_no_speculation)) {
++ if (cpu_vulnerable_to_meltdown(c))
++ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++ }
+
+ fpu__init_system(c);
+
--- /dev/null
+Subject: x86/retpoline: Remove the esp/rsp thunk
+From: Waiman Long longman@redhat.com
+Date: Mon Jan 22 17:09:34 2018 -0500
+
+From: Waiman Long longman@redhat.com
+
+commit 1df37383a8aeabb9b418698f0bcdffea01f4b1b2
+
+It doesn't make sense to have an indirect call thunk with esp/rsp as
+retpoline code won't work correctly with the stack pointer register.
+Removing it will help compiler writers to catch error in case such
+a thunk call is emitted incorrectly.
+
+Fixes: 76b043848fd2 ("x86/retpoline: Add initial retpoline support")
+Suggested-by: Jeff Law <law@redhat.com>
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1516658974-27852-1-git-send-email-longman@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/asm-prototypes.h | 1 -
+ arch/x86/lib/retpoline.S | 1 -
+ 2 files changed, 2 deletions(-)
+
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -38,5 +38,4 @@ INDIRECT_THUNK(dx)
+ INDIRECT_THUNK(si)
+ INDIRECT_THUNK(di)
+ INDIRECT_THUNK(bp)
+-INDIRECT_THUNK(sp)
+ #endif /* CONFIG_RETPOLINE */
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -36,7 +36,6 @@ GENERATE_THUNK(_ASM_DX)
+ GENERATE_THUNK(_ASM_SI)
+ GENERATE_THUNK(_ASM_DI)
+ GENERATE_THUNK(_ASM_BP)
+-GENERATE_THUNK(_ASM_SP)
+ #ifdef CONFIG_64BIT
+ GENERATE_THUNK(r8)
+ GENERATE_THUNK(r9)
--- /dev/null
+Subject: x86/retpoline: Simplify vmexit_fill_RSB()
+From: Borislav Petkov bp@alien8.de
+Date: Sat Jan 27 16:24:33 2018 +0000
+
+From: Borislav Petkov bp@alien8.de
+
+commit 1dde7415e99933bb7293d6b2843752cbdb43ec11
+
+Simplify it to call an asm-function instead of pasting 41 insn bytes at
+every call site. Also, add alignment to the macro as suggested here:
+
+ https://support.google.com/faqs/answer/7625886
+
+[dwmw2: Clean up comments, let it clobber %ebx and just tell the compiler]
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1517070274-12128-3-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/entry/entry_32.S | 3 -
+ arch/x86/entry/entry_64.S | 3 -
+ arch/x86/include/asm/asm-prototypes.h | 3 +
+ arch/x86/include/asm/nospec-branch.h | 70 +++-------------------------------
+ arch/x86/lib/Makefile | 1
+ arch/x86/lib/retpoline.S | 56 +++++++++++++++++++++++++++
+ 6 files changed, 71 insertions(+), 65 deletions(-)
+
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -252,7 +252,8 @@ ENTRY(__switch_to_asm)
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+- FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++ /* Clobbers %ebx */
++ FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ #endif
+
+ /* restore callee-saved registers */
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -495,7 +495,8 @@ ENTRY(__switch_to_asm)
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+- FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++ /* Clobbers %rbx */
++ FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ #endif
+
+ /* restore callee-saved registers */
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -38,4 +38,7 @@ INDIRECT_THUNK(dx)
+ INDIRECT_THUNK(si)
+ INDIRECT_THUNK(di)
+ INDIRECT_THUNK(bp)
++asmlinkage void __fill_rsb(void);
++asmlinkage void __clear_rsb(void);
++
+ #endif /* CONFIG_RETPOLINE */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -7,50 +7,6 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
+
+-/*
+- * Fill the CPU return stack buffer.
+- *
+- * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+- *
+- * This is required in various cases for retpoline and IBRS-based
+- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+- * eliminate potentially bogus entries from the RSB, and sometimes
+- * purely to ensure that it doesn't get empty, which on some CPUs would
+- * allow predictions from other (unwanted!) sources to be used.
+- *
+- * We define a CPP macro such that it can be used from both .S files and
+- * inline assembly. It's possible to do a .macro and then include that
+- * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+- */
+-
+-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
+-#define RSB_FILL_LOOPS 16 /* To avoid underflow */
+-
+-/*
+- * Google experimented with loop-unrolling and this turned out to be
+- * the optimal version — two calls, each with their own speculation
+- * trap should their return address end up getting used, in a loop.
+- */
+-#define __FILL_RETURN_BUFFER(reg, nr, sp) \
+- mov $(nr/2), reg; \
+-771: \
+- call 772f; \
+-773: /* speculation trap */ \
+- pause; \
+- lfence; \
+- jmp 773b; \
+-772: \
+- call 774f; \
+-775: /* speculation trap */ \
+- pause; \
+- lfence; \
+- jmp 775b; \
+-774: \
+- dec reg; \
+- jnz 771b; \
+- add $(BITS_PER_LONG/8) * nr, sp;
+-
+ #ifdef __ASSEMBLY__
+
+ /*
+@@ -121,17 +77,10 @@
+ #endif
+ .endm
+
+- /*
+- * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+- * monstrosity above, manually.
+- */
+-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
++/* This clobbers the BX register */
++.macro FILL_RETURN_BUFFER nr:req ftr:req
+ #ifdef CONFIG_RETPOLINE
+- ANNOTATE_NOSPEC_ALTERNATIVE
+- ALTERNATIVE "jmp .Lskip_rsb_\@", \
+- __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
+- \ftr
+-.Lskip_rsb_\@:
++ ALTERNATIVE "", "call __clear_rsb", \ftr
+ #endif
+ .endm
+
+@@ -206,15 +155,10 @@ extern char __indirect_thunk_end[];
+ static inline void vmexit_fill_RSB(void)
+ {
+ #ifdef CONFIG_RETPOLINE
+- unsigned long loops;
+-
+- asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+- ALTERNATIVE("jmp 910f",
+- __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+- X86_FEATURE_RETPOLINE)
+- "910:"
+- : "=r" (loops), ASM_CALL_CONSTRAINT
+- : : "memory" );
++ alternative_input("",
++ "call __fill_rsb",
++ X86_FEATURE_RETPOLINE,
++ ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+ #endif
+ }
+
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -27,6 +27,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) +=
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+ lib-$(CONFIG_RETPOLINE) += retpoline.o
++OBJECT_FILES_NON_STANDARD_retpoline.o :=y
+
+ obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -7,6 +7,7 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+ #include <asm/nospec-branch.h>
++#include <asm/bitsperlong.h>
+
+ .macro THUNK reg
+ .section .text.__x86.indirect_thunk
+@@ -46,3 +47,58 @@ GENERATE_THUNK(r13)
+ GENERATE_THUNK(r14)
+ GENERATE_THUNK(r15)
+ #endif
++
++/*
++ * Fill the CPU return stack buffer.
++ *
++ * Each entry in the RSB, if used for a speculative 'ret', contains an
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
++ *
++ * This is required in various cases for retpoline and IBRS-based
++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
++ * eliminate potentially bogus entries from the RSB, and sometimes
++ * purely to ensure that it doesn't get empty, which on some CPUs would
++ * allow predictions from other (unwanted!) sources to be used.
++ *
++ * Google experimented with loop-unrolling and this turned out to be
++ * the optimal version - two calls, each with their own speculation
++ * trap should their return address end up getting used, in a loop.
++ */
++.macro STUFF_RSB nr:req sp:req
++ mov $(\nr / 2), %_ASM_BX
++ .align 16
++771:
++ call 772f
++773: /* speculation trap */
++ pause
++ lfence
++ jmp 773b
++ .align 16
++772:
++ call 774f
++775: /* speculation trap */
++ pause
++ lfence
++ jmp 775b
++ .align 16
++774:
++ dec %_ASM_BX
++ jnz 771b
++ add $((BITS_PER_LONG/8) * \nr), \sp
++.endm
++
++#define RSB_FILL_LOOPS 16 /* To avoid underflow */
++
++ENTRY(__fill_rsb)
++ STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
++ ret
++END(__fill_rsb)
++EXPORT_SYMBOL_GPL(__fill_rsb)
++
++#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
++
++ENTRY(__clear_rsb)
++ STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
++ ret
++END(__clear_rsb)
++EXPORT_SYMBOL_GPL(__clear_rsb)
--- /dev/null
+Subject: x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support
+From: David Woodhouse dwmw@amazon.co.uk
+Date: Thu Jan 25 16:14:15 2018 +0000
+
+From: David Woodhouse dwmw@amazon.co.uk
+
+commit 20ffa1caecca4db8f79fe665acdeaa5af815a24d
+
+Expose indirect_branch_prediction_barrier() for use in subsequent patches.
+
+[ tglx: Add IBPB status to spectre_v2 sysfs file ]
+
+Co-developed-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: ak@linux.intel.com
+Cc: ashok.raj@intel.com
+Cc: dave.hansen@intel.com
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1516896855-7642-8-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/include/asm/nospec-branch.h | 13 +++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 10 +++++++++-
+ 3 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -210,6 +210,8 @@
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
+
++#define X86_FEATURE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled*/
++
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+ #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -218,5 +218,18 @@ static inline void vmexit_fill_RSB(void)
+ #endif
+ }
+
++static inline void indirect_branch_prediction_barrier(void)
++{
++ asm volatile(ALTERNATIVE("",
++ "movl %[msr], %%ecx\n\t"
++ "movl %[val], %%eax\n\t"
++ "movl $0, %%edx\n\t"
++ "wrmsr",
++ X86_FEATURE_IBPB)
++ : : [msr] "i" (MSR_IA32_PRED_CMD),
++ [val] "i" (PRED_CMD_IBPB)
++ : "eax", "ecx", "edx", "memory");
++}
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -263,6 +263,13 @@ retpoline_auto:
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Filling RSB on context switch\n");
+ }
++
++ /* Initialize Indirect Branch Prediction Barrier if supported */
++ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) ||
++ boot_cpu_has(X86_FEATURE_AMD_PRED_CMD)) {
++ setup_force_cpu_cap(X86_FEATURE_IBPB);
++ pr_info("Enabling Indirect Branch Prediction Barrier\n");
++ }
+ }
+
+ #undef pr_fmt
+@@ -292,7 +299,8 @@ ssize_t cpu_show_spectre_v2(struct devic
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
+
+- return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ boot_cpu_has(X86_FEATURE_IBPB) ? ", IPBP" : "",
+ spectre_v2_bad_module ? " - vulnerable module loaded" : "");
+ }
+ #endif
--- /dev/null
+Subject: x86/speculation: Simplify indirect_branch_prediction_barrier()
+From: Borislav Petkov bp@suse.de
+Date: Sat Jan 27 16:24:34 2018 +0000
+
+From: Borislav Petkov bp@suse.de
+
+commit 64e16720ea0879f8ab4547e3b9758936d483909b
+
+Make it all a function which does the WRMSR instead of having a hairy
+inline asm.
+
+[dwmw2: export it, fix CONFIG_RETPOLINE issues]
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: dave.hansen@intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: pbonzini@redhat.com
+Cc: tim.c.chen@linux.intel.com
+Cc: gregkh@linux-foundation.org
+Link: https://lkml.kernel.org/r/1517070274-12128-4-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/x86/include/asm/nospec-branch.h | 13 ++++---------
+ arch/x86/include/asm/processor.h | 3 +++
+ arch/x86/kernel/cpu/bugs.c | 6 ++++++
+ 3 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -164,15 +164,10 @@ static inline void vmexit_fill_RSB(void)
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+- asm volatile(ALTERNATIVE("",
+- "movl %[msr], %%ecx\n\t"
+- "movl %[val], %%eax\n\t"
+- "movl $0, %%edx\n\t"
+- "wrmsr",
+- X86_FEATURE_USE_IBPB)
+- : : [msr] "i" (MSR_IA32_PRED_CMD),
+- [val] "i" (PRED_CMD_IBPB)
+- : "eax", "ecx", "edx", "memory");
++ alternative_input("",
++ "call __ibp_barrier",
++ X86_FEATURE_USE_IBPB,
++ ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
+ }
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -970,4 +970,7 @@ bool xen_set_default_idle(void);
+
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
++
++void __ibp_barrier(void);
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -311,3 +311,9 @@ ssize_t cpu_show_spectre_v2(struct devic
+ spectre_v2_module_string());
+ }
+ #endif
++
++void __ibp_barrier(void)
++{
++ __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
++}
++EXPORT_SYMBOL_GPL(__ibp_barrier);