From 39b6eff80a4ee3fa1df1c383b62952158172f4c4 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 20 Feb 2018 09:47:26 +0100 Subject: [PATCH] 4.9-stable patches added patches: compiler-gcc.h-introduce-__optimize-function-attribute.patch kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch x86-speculation-clean-up-various-spectre-related-details.patch x86-speculation-correct-speculation-control-microcode-blacklist-again.patch x86-speculation-update-speculation-control-microcode-blacklist.patch --- ...roduce-__optimize-function-attribute.patch | 48 ++++++ ...ays-inlining-iterator-helper-methods.patch | 100 ++++++++++++ queue-4.9/series | 7 + ...to-reduce-speculation-attack-surface.patch | 114 ++++++++++++++ ...trl-and-pred_cmd-before-merging-msrs.patch | 61 ++++++++ ...n-up-various-spectre-related-details.patch | 145 ++++++++++++++++++ ...on-control-microcode-blacklist-again.patch | 75 +++++++++ ...culation-control-microcode-blacklist.patch | 66 ++++++++ 8 files changed, 616 insertions(+) create mode 100644 queue-4.9/compiler-gcc.h-introduce-__optimize-function-attribute.patch create mode 100644 queue-4.9/kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch create mode 100644 queue-4.9/x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch create mode 100644 queue-4.9/x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch create mode 100644 queue-4.9/x86-speculation-clean-up-various-spectre-related-details.patch create mode 100644 queue-4.9/x86-speculation-correct-speculation-control-microcode-blacklist-again.patch create mode 100644 queue-4.9/x86-speculation-update-speculation-control-microcode-blacklist.patch diff --git a/queue-4.9/compiler-gcc.h-introduce-__optimize-function-attribute.patch b/queue-4.9/compiler-gcc.h-introduce-__optimize-function-attribute.patch new file mode 100644 index 00000000000..d51d7111456 --- /dev/null +++ b/queue-4.9/compiler-gcc.h-introduce-__optimize-function-attribute.patch @@ -0,0 +1,48 @@ +From df5d45aa08f848b79caf395211b222790534ccc7 Mon Sep 17 00:00:00 2001 +From: Geert Uytterhoeven +Date: Thu, 1 Feb 2018 11:21:58 +0100 +Subject: compiler-gcc.h: Introduce __optimize function attribute + +From: Geert Uytterhoeven + +commit df5d45aa08f848b79caf395211b222790534ccc7 upstream. + +Create a new function attribute __optimize, which allows to specify an +optimization level on a per-function basis. + +Signed-off-by: Geert Uytterhoeven +Acked-by: Ard Biesheuvel +Signed-off-by: Herbert Xu +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/compiler-gcc.h | 4 ++++ + include/linux/compiler.h | 4 ++++ + 2 files changed, 8 insertions(+) + +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -187,6 +187,10 @@ + #endif /* __CHECKER__ */ + #endif /* GCC_VERSION >= 40300 */ + ++#if GCC_VERSION >= 40400 ++#define __optimize(level) __attribute__((__optimize__(level))) ++#endif /* GCC_VERSION >= 40400 */ ++ + #if GCC_VERSION >= 40500 + + #ifndef __CHECKER__ +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -469,6 +469,10 @@ static __always_inline void __write_once + # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + #endif + ++#ifndef __optimize ++# define __optimize(level) ++#endif ++ + /* Compile time object size, -1 for unknown */ + #ifndef __compiletime_object_size + # define __compiletime_object_size(obj) -1 diff --git a/queue-4.9/kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch b/queue-4.9/kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch new file mode 100644 index 00000000000..71861d17c9e --- /dev/null +++ b/queue-4.9/kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch @@ -0,0 +1,100 @@ +From 928a4c39484281f8ca366f53a1db79330d058401 Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Sat, 10 Feb 2018 23:39:24 +0000 +Subject: KVM/x86: Reduce retpoline performance impact in slot_handle_level_range(), by always inlining iterator helper methods +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: David Woodhouse + +commit 928a4c39484281f8ca366f53a1db79330d058401 upstream. + +With retpoline, tight loops of "call this function for every XXX" are +very much pessimised by taking a prediction miss *every* time. This one +is by far the biggest contributor to the guest launch time with retpoline. + +By marking the iterator slot_handle_…() functions always_inline, we can +ensure that the indirect function call can be optimised away into a +direct call and it actually generates slightly smaller code because +some of the other conditionals can get optimised away too. + +Performance is now pretty close to what we see with nospectre_v2 on +the command line. + +Suggested-by: Linus Torvalds +Tested-by: Filippo Sironi +Signed-off-by: David Woodhouse +Reviewed-by: Filippo Sironi +Acked-by: Paolo Bonzini +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dan Williams +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: arjan.van.de.ven@intel.com +Cc: dave.hansen@intel.com +Cc: jmattson@google.com +Cc: karahmed@amazon.de +Cc: kvm@vger.kernel.org +Cc: rkrcmar@redhat.com +Link: http://lkml.kernel.org/r/1518305967-31356-4-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kvm/mmu.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -4640,7 +4640,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) + typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); + + /* The caller should hold mmu-lock before calling this function. */ +-static bool ++static __always_inline bool + slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int start_level, int end_level, + gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) +@@ -4670,7 +4670,7 @@ slot_handle_level_range(struct kvm *kvm, + return flush; + } + +-static bool ++static __always_inline bool + slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, int start_level, int end_level, + bool lock_flush_tlb) +@@ -4681,7 +4681,7 @@ slot_handle_level(struct kvm *kvm, struc + lock_flush_tlb); + } + +-static bool ++static __always_inline bool + slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) + { +@@ -4689,7 +4689,7 @@ slot_handle_all_level(struct kvm *kvm, s + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); + } + +-static bool ++static __always_inline bool + slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) + { +@@ -4697,7 +4697,7 @@ slot_handle_large_level(struct kvm *kvm, + PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); + } + +-static bool ++static __always_inline bool + slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, + slot_level_handler fn, bool lock_flush_tlb) + { diff --git a/queue-4.9/series b/queue-4.9/series index b9ae6eba924..c179b2634f3 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -15,3 +15,10 @@ arm-dts-nomadik-add-interrupt-parent-for-clcd.patch arm-spear600-add-missing-interrupt-parent-of-rtc.patch arm-spear13xx-fix-dmas-cells.patch arm-spear13xx-fix-spics-gpio-controller-s-warning.patch +x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch +compiler-gcc.h-introduce-__optimize-function-attribute.patch +x86-speculation-update-speculation-control-microcode-blacklist.patch +x86-speculation-correct-speculation-control-microcode-blacklist-again.patch +kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch +x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch +x86-speculation-clean-up-various-spectre-related-details.patch diff --git a/queue-4.9/x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch b/queue-4.9/x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch new file mode 100644 index 00000000000..55103c84064 --- /dev/null +++ b/queue-4.9/x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch @@ -0,0 +1,114 @@ +From 6b8cf5cc9965673951f1ab3f0e3cf23d06e3e2ee Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Mon, 5 Feb 2018 17:18:17 -0800 +Subject: x86/entry/64/compat: Clear registers for compat syscalls, to reduce speculation attack surface + +From: Dan Williams + +commit 6b8cf5cc9965673951f1ab3f0e3cf23d06e3e2ee upstream. + +At entry userspace may have populated registers with values that could +otherwise be useful in a speculative execution attack. Clear them to +minimize the kernel's attack surface. + +Originally-From: Andi Kleen +Signed-off-by: Dan Williams +Cc: +Cc: Andy Lutomirski +Cc: Borislav Petkov +Cc: Brian Gerst +Cc: Denys Vlasenko +Cc: H. Peter Anvin +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Link: http://lkml.kernel.org/r/151787989697.7847.4083702787288600552.stgit@dwillia2-desk3.amr.corp.intel.com +[ Made small improvements to the changelog. ] +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/entry/entry_64_compat.S | 30 ++++++++++++++++++++++++++++++ + 1 file changed, 30 insertions(+) + +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -83,15 +83,25 @@ ENTRY(entry_SYSENTER_compat) + pushq %rcx /* pt_regs->cx */ + pushq $-ENOSYS /* pt_regs->ax */ + pushq $0 /* pt_regs->r8 = 0 */ ++ xorq %r8, %r8 /* nospec r8 */ + pushq $0 /* pt_regs->r9 = 0 */ ++ xorq %r9, %r9 /* nospec r9 */ + pushq $0 /* pt_regs->r10 = 0 */ ++ xorq %r10, %r10 /* nospec r10 */ + pushq $0 /* pt_regs->r11 = 0 */ ++ xorq %r11, %r11 /* nospec r11 */ + pushq %rbx /* pt_regs->rbx */ ++ xorl %ebx, %ebx /* nospec rbx */ + pushq %rbp /* pt_regs->rbp (will be overwritten) */ ++ xorl %ebp, %ebp /* nospec rbp */ + pushq $0 /* pt_regs->r12 = 0 */ ++ xorq %r12, %r12 /* nospec r12 */ + pushq $0 /* pt_regs->r13 = 0 */ ++ xorq %r13, %r13 /* nospec r13 */ + pushq $0 /* pt_regs->r14 = 0 */ ++ xorq %r14, %r14 /* nospec r14 */ + pushq $0 /* pt_regs->r15 = 0 */ ++ xorq %r15, %r15 /* nospec r15 */ + cld + + /* +@@ -209,15 +219,25 @@ ENTRY(entry_SYSCALL_compat) + pushq %rbp /* pt_regs->cx (stashed in bp) */ + pushq $-ENOSYS /* pt_regs->ax */ + pushq $0 /* pt_regs->r8 = 0 */ ++ xorq %r8, %r8 /* nospec r8 */ + pushq $0 /* pt_regs->r9 = 0 */ ++ xorq %r9, %r9 /* nospec r9 */ + pushq $0 /* pt_regs->r10 = 0 */ ++ xorq %r10, %r10 /* nospec r10 */ + pushq $0 /* pt_regs->r11 = 0 */ ++ xorq %r11, %r11 /* nospec r11 */ + pushq %rbx /* pt_regs->rbx */ ++ xorl %ebx, %ebx /* nospec rbx */ + pushq %rbp /* pt_regs->rbp (will be overwritten) */ ++ xorl %ebp, %ebp /* nospec rbp */ + pushq $0 /* pt_regs->r12 = 0 */ ++ xorq %r12, %r12 /* nospec r12 */ + pushq $0 /* pt_regs->r13 = 0 */ ++ xorq %r13, %r13 /* nospec r13 */ + pushq $0 /* pt_regs->r14 = 0 */ ++ xorq %r14, %r14 /* nospec r14 */ + pushq $0 /* pt_regs->r15 = 0 */ ++ xorq %r15, %r15 /* nospec r15 */ + + /* + * User mode is traced as though IRQs are on, and SYSENTER +@@ -320,15 +340,25 @@ ENTRY(entry_INT80_compat) + pushq %rcx /* pt_regs->cx */ + pushq $-ENOSYS /* pt_regs->ax */ + pushq $0 /* pt_regs->r8 = 0 */ ++ xorq %r8, %r8 /* nospec r8 */ + pushq $0 /* pt_regs->r9 = 0 */ ++ xorq %r9, %r9 /* nospec r9 */ + pushq $0 /* pt_regs->r10 = 0 */ ++ xorq %r10, %r10 /* nospec r10 */ + pushq $0 /* pt_regs->r11 = 0 */ ++ xorq %r11, %r11 /* nospec r11 */ + pushq %rbx /* pt_regs->rbx */ ++ xorl %ebx, %ebx /* nospec rbx */ + pushq %rbp /* pt_regs->rbp */ ++ xorl %ebp, %ebp /* nospec rbp */ + pushq %r12 /* pt_regs->r12 */ ++ xorq %r12, %r12 /* nospec r12 */ + pushq %r13 /* pt_regs->r13 */ ++ xorq %r13, %r13 /* nospec r13 */ + pushq %r14 /* pt_regs->r14 */ ++ xorq %r14, %r14 /* nospec r14 */ + pushq %r15 /* pt_regs->r15 */ ++ xorq %r15, %r15 /* nospec r15 */ + cld + + /* diff --git a/queue-4.9/x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch b/queue-4.9/x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch new file mode 100644 index 00000000000..ce3035e84df --- /dev/null +++ b/queue-4.9/x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch @@ -0,0 +1,61 @@ +From 206587a9fb764d71f035dc7f6d3b6488f5d5b304 Mon Sep 17 00:00:00 2001 +From: KarimAllah Ahmed +Date: Sat, 10 Feb 2018 23:39:25 +0000 +Subject: X86/nVMX: Properly set spec_ctrl and pred_cmd before merging MSRs +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: KarimAllah Ahmed + +commit 206587a9fb764d71f035dc7f6d3b6488f5d5b304 upstream. + +These two variables should check whether SPEC_CTRL and PRED_CMD are +supposed to be passed through to L2 guests or not. While +msr_write_intercepted_l01 would return 'true' if it is not passed through. + +So just invert the result of msr_write_intercepted_l01 to implement the +correct semantics. + +Signed-off-by: KarimAllah Ahmed +Signed-off-by: David Woodhouse +Reviewed-by: Jim Mattson +Acked-by: Paolo Bonzini +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dan Williams +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Radim Krčmář +Cc: Thomas Gleixner +Cc: arjan.van.de.ven@intel.com +Cc: dave.hansen@intel.com +Cc: kvm@vger.kernel.org +Cc: sironi@amazon.de +Fixes: 086e7d4118cc ("KVM: VMX: Allow direct access to MSR_IA32_SPEC_CTRL") +Link: http://lkml.kernel.org/r/1518305967-31356-5-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kvm/vmx.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -9606,8 +9606,8 @@ static inline bool nested_vmx_merge_msr_ + * updated to reflect this when L1 (or its L2s) actually write to + * the MSR. + */ +- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); +- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); ++ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); ++ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); + + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && + !pred_cmd && !spec_ctrl) diff --git a/queue-4.9/x86-speculation-clean-up-various-spectre-related-details.patch b/queue-4.9/x86-speculation-clean-up-various-spectre-related-details.patch new file mode 100644 index 00000000000..49127e32b63 --- /dev/null +++ b/queue-4.9/x86-speculation-clean-up-various-spectre-related-details.patch @@ -0,0 +1,145 @@ +From 21e433bdb95bdf3aa48226fd3d33af608437f293 Mon Sep 17 00:00:00 2001 +From: Ingo Molnar +Date: Tue, 13 Feb 2018 09:03:08 +0100 +Subject: x86/speculation: Clean up various Spectre related details + +From: Ingo Molnar + +commit 21e433bdb95bdf3aa48226fd3d33af608437f293 upstream. + +Harmonize all the Spectre messages so that a: + + dmesg | grep -i spectre + +... gives us most Spectre related kernel boot messages. + +Also fix a few other details: + + - clarify a comment about firmware speculation control + + - s/KPTI/PTI + + - remove various line-breaks that made the code uglier + +Acked-by: David Woodhouse +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dan Williams +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: linux-kernel@vger.kernel.org +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/cpu/bugs.c | 28 +++++++++++----------------- + 1 file changed, 11 insertions(+), 17 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -161,8 +161,7 @@ static enum spectre_v2_mitigation_cmd __ + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + return SPECTRE_V2_CMD_NONE; + else { +- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, +- sizeof(arg)); ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; + +@@ -174,8 +173,7 @@ static enum spectre_v2_mitigation_cmd __ + } + + if (i >= ARRAY_SIZE(mitigation_options)) { +- pr_err("unknown option (%s). Switching to AUTO select\n", +- mitigation_options[i].option); ++ pr_err("unknown option (%s). Switching to AUTO select\n", mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + } +@@ -184,8 +182,7 @@ static enum spectre_v2_mitigation_cmd __ + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && + !IS_ENABLED(CONFIG_RETPOLINE)) { +- pr_err("%s selected but not compiled in. Switching to AUTO select\n", +- mitigation_options[i].option); ++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + +@@ -255,14 +252,14 @@ static void __init spectre_v2_select_mit + goto retpoline_auto; + break; + } +- pr_err("kernel not compiled with retpoline; no mitigation available!"); ++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); + return; + + retpoline_auto: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + retpoline_amd: + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { +- pr_err("LFENCE not serializing. Switching to generic retpoline\n"); ++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); + goto retpoline_generic; + } + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : +@@ -280,7 +277,7 @@ retpoline_auto: + pr_info("%s\n", spectre_v2_strings[mode]); + + /* +- * If neither SMEP or KPTI are available, there is a risk of ++ * If neither SMEP nor PTI are available, there is a risk of + * hitting userspace addresses in the RSB after a context switch + * from a shallow call stack to a deeper one. To prevent this fill + * the entire RSB, even when using IBRS. +@@ -294,21 +291,20 @@ retpoline_auto: + if ((!boot_cpu_has(X86_FEATURE_KAISER) && + !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); +- pr_info("Filling RSB on context switch\n"); ++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); + } + + /* Initialize Indirect Branch Prediction Barrier if supported */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); +- pr_info("Enabling Indirect Branch Prediction Barrier\n"); ++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); + } + } + + #undef pr_fmt + + #ifdef CONFIG_SYSFS +-ssize_t cpu_show_meltdown(struct device *dev, +- struct device_attribute *attr, char *buf) ++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + { + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + return sprintf(buf, "Not affected\n"); +@@ -317,16 +313,14 @@ ssize_t cpu_show_meltdown(struct device + return sprintf(buf, "Vulnerable\n"); + } + +-ssize_t cpu_show_spectre_v1(struct device *dev, +- struct device_attribute *attr, char *buf) ++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) + { + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) + return sprintf(buf, "Not affected\n"); + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + } + +-ssize_t cpu_show_spectre_v2(struct device *dev, +- struct device_attribute *attr, char *buf) ++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) + { + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return sprintf(buf, "Not affected\n"); diff --git a/queue-4.9/x86-speculation-correct-speculation-control-microcode-blacklist-again.patch b/queue-4.9/x86-speculation-correct-speculation-control-microcode-blacklist-again.patch new file mode 100644 index 00000000000..0b131e254db --- /dev/null +++ b/queue-4.9/x86-speculation-correct-speculation-control-microcode-blacklist-again.patch @@ -0,0 +1,75 @@ +From d37fc6d360a404b208547ba112e7dabb6533c7fc Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Mon, 12 Feb 2018 15:27:34 +0000 +Subject: x86/speculation: Correct Speculation Control microcode blacklist again + +From: David Woodhouse + +commit d37fc6d360a404b208547ba112e7dabb6533c7fc upstream. + +Arjan points out that the Intel document only clears the 0xc2 microcode +on *some* parts with CPUID 506E3 (INTEL_FAM6_SKYLAKE_DESKTOP stepping 3). +For the Skylake H/S platform it's OK but for Skylake E3 which has the +same CPUID it isn't (yet) cleared. + +So removing it from the blacklist was premature. Put it back for now. + +Also, Arjan assures me that the 0x84 microcode for Kaby Lake which was +featured in one of the early revisions of the Intel document was never +released to the public, and won't be until/unless it is also validated +as safe. So those can change to 0x80 which is what all *other* versions +of the doc have identified. + +Once the retrospective testing of existing public microcodes is done, we +should be back into a mode where new microcodes are only released in +batches and we shouldn't even need to update the blacklist for those +anyway, so this tweaking of the list isn't expected to be a thing which +keeps happening. + +Requested-by: Arjan van de Ven +Signed-off-by: David Woodhouse +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dan Williams +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: arjan.van.de.ven@intel.com +Cc: dave.hansen@intel.com +Cc: kvm@vger.kernel.org +Cc: pbonzini@redhat.com +Link: http://lkml.kernel.org/r/1518449255-2182-1-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/cpu/intel.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -75,13 +75,14 @@ struct sku_microcode { + u32 microcode; + }; + static const struct sku_microcode spectre_bad_microcodes[] = { +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, +- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, +- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, ++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, ++ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, diff --git a/queue-4.9/x86-speculation-update-speculation-control-microcode-blacklist.patch b/queue-4.9/x86-speculation-update-speculation-control-microcode-blacklist.patch new file mode 100644 index 00000000000..3b2da75d289 --- /dev/null +++ b/queue-4.9/x86-speculation-update-speculation-control-microcode-blacklist.patch @@ -0,0 +1,66 @@ +From 1751342095f0d2b36fa8114d8e12c5688c455ac4 Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Sat, 10 Feb 2018 23:39:22 +0000 +Subject: x86/speculation: Update Speculation Control microcode blacklist + +From: David Woodhouse + +commit 1751342095f0d2b36fa8114d8e12c5688c455ac4 upstream. + +Intel have retroactively blessed the 0xc2 microcode on Skylake mobile +and desktop parts, and the Gemini Lake 0x22 microcode is apparently fine +too. We blacklisted the latter purely because it was present with all +the other problematic ones in the 2018-01-08 release, but now it's +explicitly listed as OK. + +We still list 0x84 for the various Kaby Lake / Coffee Lake parts, as +that appeared in one version of the blacklist and then reverted to +0x80 again. We can change it if 0x84 is actually announced to be safe. + +Signed-off-by: David Woodhouse +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dan Williams +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: arjan.van.de.ven@intel.com +Cc: jmattson@google.com +Cc: karahmed@amazon.de +Cc: kvm@vger.kernel.org +Cc: pbonzini@redhat.com +Cc: rkrcmar@redhat.com +Cc: sironi@amazon.de +Link: http://lkml.kernel.org/r/1518305967-31356-2-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/cpu/intel.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -82,8 +82,6 @@ static const struct sku_microcode spectr + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, +- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, +- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, +@@ -95,8 +93,6 @@ static const struct sku_microcode spectr + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, +- /* Updated in the 20180108 release; blacklist until we know otherwise */ +- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, + /* Observed in the wild */ + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, -- 2.47.3