--- /dev/null
+From d9afaaa4ff7af8b87d4a205e48cb8a6f666d7f01 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Thu, 1 Feb 2018 11:21:59 +0100
+Subject: compiler-gcc.h: __nostackprotector needs gcc-4.4 and up
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit d9afaaa4ff7af8b87d4a205e48cb8a6f666d7f01 upstream.
+
+Gcc versions before 4.4 do not recognize the __optimize__ compiler
+attribute:
+
+ warning: ‘__optimize__’ attribute directive ignored
+
+Fixes: 7375ae3a0b79ea07 ("compiler-gcc.h: Introduce __nostackprotector function attribute")
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/compiler-gcc.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -167,8 +167,6 @@
+
+ #if GCC_VERSION >= 40100
+ # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+-
+-#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
+ #endif
+
+ #if GCC_VERSION >= 40300
+@@ -198,6 +196,7 @@
+
+ #if GCC_VERSION >= 40400
+ #define __optimize(level) __attribute__((__optimize__(level)))
++#define __nostackprotector __optimize("no-stack-protector")
+ #endif /* GCC_VERSION >= 40400 */
+
+ #if GCC_VERSION >= 40500
--- /dev/null
+From df5d45aa08f848b79caf395211b222790534ccc7 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+Date: Thu, 1 Feb 2018 11:21:58 +0100
+Subject: compiler-gcc.h: Introduce __optimize function attribute
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+commit df5d45aa08f848b79caf395211b222790534ccc7 upstream.
+
+Create a new function attribute __optimize, which allows to specify an
+optimization level on a per-function basis.
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/compiler-gcc.h | 4 ++++
+ include/linux/compiler.h | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -196,6 +196,10 @@
+ #endif /* __CHECKER__ */
+ #endif /* GCC_VERSION >= 40300 */
+
++#if GCC_VERSION >= 40400
++#define __optimize(level) __attribute__((__optimize__(level)))
++#endif /* GCC_VERSION >= 40400 */
++
+ #if GCC_VERSION >= 40500
+
+ #ifndef __CHECKER__
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -271,6 +271,10 @@ static __always_inline void __write_once
+
+ #endif /* __ASSEMBLY__ */
+
++#ifndef __optimize
++# define __optimize(level)
++#endif
++
+ /* Compile time object size, -1 for unknown */
+ #ifndef __compiletime_object_size
+ # define __compiletime_object_size(obj) -1
--- /dev/null
+From 2e7d1d61ea6c0f1c4da5eb82cafac750d55637a7 Mon Sep 17 00:00:00 2001
+From: Artem Savkov <artem.savkov@gmail.com>
+Date: Tue, 6 Feb 2018 22:20:22 +0100
+Subject: crypto: sun4i_ss_prng - convert lock to _bh in sun4i_ss_prng_generate
+
+From: Artem Savkov <artem.savkov@gmail.com>
+
+commit 2e7d1d61ea6c0f1c4da5eb82cafac750d55637a7 upstream.
+
+Lockdep detects a possible deadlock in sun4i_ss_prng_generate() and
+throws an "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage" warning.
+Disabling softirqs to fix this.
+
+Fixes: b8ae5c7387ad ("crypto: sun4i-ss - support the Security System PRNG")
+Signed-off-by: Artem Savkov <artem.savkov@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/sunxi-ss/sun4i-ss-prng.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ ss = algt->ss;
+
+- spin_lock(&ss->slock);
++ spin_lock_bh(&ss->slock);
+
+ writel(mode, ss->base + SS_CTL);
+
+@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto
+ }
+
+ writel(0, ss->base + SS_CTL);
+- spin_unlock(&ss->slock);
++ spin_unlock_bh(&ss->slock);
+ return 0;
+ }
--- /dev/null
+From dd78c832ffaf86eb6434e56de4bc3bc31f03f771 Mon Sep 17 00:00:00 2001
+From: Artem Savkov <artem.savkov@gmail.com>
+Date: Tue, 6 Feb 2018 22:20:21 +0100
+Subject: crypto: sun4i_ss_prng - fix return value of sun4i_ss_prng_generate
+
+From: Artem Savkov <artem.savkov@gmail.com>
+
+commit dd78c832ffaf86eb6434e56de4bc3bc31f03f771 upstream.
+
+According to crypto/rng.h generate function should return 0 on success
+and < 0 on error.
+
+Fixes: b8ae5c7387ad ("crypto: sun4i-ss - support the Security System PRNG")
+Signed-off-by: Artem Savkov <artem.savkov@gmail.com>
+Acked-by: Corentin Labbe <clabbe.montjoie@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/sunxi-ss/sun4i-ss-prng.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+@@ -52,5 +52,5 @@ int sun4i_ss_prng_generate(struct crypto
+
+ writel(0, ss->base + SS_CTL);
+ spin_unlock(&ss->slock);
+- return dlen;
++ return 0;
+ }
--- /dev/null
+From 3712caeb14dcb33fb4d5114f14c0beef10aca101 Mon Sep 17 00:00:00 2001
+From: KarimAllah Ahmed <karahmed@amazon.de>
+Date: Sat, 10 Feb 2018 23:39:26 +0000
+Subject: KVM/nVMX: Set the CPU_BASED_USE_MSR_BITMAPS if we have a valid L02 MSR bitmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: KarimAllah Ahmed <karahmed@amazon.de>
+
+commit 3712caeb14dcb33fb4d5114f14c0beef10aca101 upstream.
+
+We either clear the CPU_BASED_USE_MSR_BITMAPS and end up intercepting all
+MSR accesses or create a valid L02 MSR bitmap and use that. This decision
+has to be made every time we evaluate whether we are going to generate the
+L02 MSR bitmap.
+
+Before commit:
+
+ d28b387fb74d ("KVM/VMX: Allow direct access to MSR_IA32_SPEC_CTRL")
+
+... this was probably OK since the decision was always identical.
+
+This is no longer the case now since the MSR bitmap might actually
+change once we decide to not intercept SPEC_CTRL and PRED_CMD.
+
+Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan.van.de.ven@intel.com
+Cc: dave.hansen@intel.com
+Cc: jmattson@google.com
+Cc: kvm@vger.kernel.org
+Cc: sironi@amazon.de
+Link: http://lkml.kernel.org/r/1518305967-31356-6-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -10131,7 +10131,8 @@ static void nested_get_vmcs12_pages(stru
+ if (cpu_has_vmx_msr_bitmap() &&
+ nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+- ;
++ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
++ CPU_BASED_USE_MSR_BITMAPS);
+ else
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_USE_MSR_BITMAPS);
--- /dev/null
+From 928a4c39484281f8ca366f53a1db79330d058401 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 10 Feb 2018 23:39:24 +0000
+Subject: KVM/x86: Reduce retpoline performance impact in slot_handle_level_range(), by always inlining iterator helper methods
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 928a4c39484281f8ca366f53a1db79330d058401 upstream.
+
+With retpoline, tight loops of "call this function for every XXX" are
+very much pessimised by taking a prediction miss *every* time. This one
+is by far the biggest contributor to the guest launch time with retpoline.
+
+By marking the iterator slot_handle_…() functions always_inline, we can
+ensure that the indirect function call can be optimised away into a
+direct call and it actually generates slightly smaller code because
+some of the other conditionals can get optimised away too.
+
+Performance is now pretty close to what we see with nospectre_v2 on
+the command line.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Tested-by: Filippo Sironi <sironi@amazon.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Reviewed-by: Filippo Sironi <sironi@amazon.de>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan.van.de.ven@intel.com
+Cc: dave.hansen@intel.com
+Cc: jmattson@google.com
+Cc: karahmed@amazon.de
+Cc: kvm@vger.kernel.org
+Cc: rkrcmar@redhat.com
+Link: http://lkml.kernel.org/r/1518305967-31356-4-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5058,7 +5058,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
+ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+
+ /* The caller should hold mmu-lock before calling this function. */
+-static bool
++static __always_inline bool
+ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, int start_level, int end_level,
+ gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
+@@ -5088,7 +5088,7 @@ slot_handle_level_range(struct kvm *kvm,
+ return flush;
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, int start_level, int end_level,
+ bool lock_flush_tlb)
+@@ -5099,7 +5099,7 @@ slot_handle_level(struct kvm *kvm, struc
+ lock_flush_tlb);
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+ {
+@@ -5107,7 +5107,7 @@ slot_handle_all_level(struct kvm *kvm, s
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+ {
+@@ -5115,7 +5115,7 @@ slot_handle_large_level(struct kvm *kvm,
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+ {
--- /dev/null
+From 4dd5f8a99e791a8c6500e3592f3ce81ae7edcde1 Mon Sep 17 00:00:00 2001
+From: Balbir Singh <bsingharora@gmail.com>
+Date: Wed, 7 Feb 2018 17:35:51 +1100
+Subject: powerpc/mm/radix: Split linear mapping on hot-unplug
+
+From: Balbir Singh <bsingharora@gmail.com>
+
+commit 4dd5f8a99e791a8c6500e3592f3ce81ae7edcde1 upstream.
+
+This patch splits the linear mapping if the hot-unplug range is
+smaller than the mapping size. The code detects if the mapping needs
+to be split into a smaller size and if so, uses the stop machine
+infrastructure to clear the existing mapping and then remap the
+remaining range using a smaller page size.
+
+The code will skip any region of the mapping that overlaps with kernel
+text and warn about it once. We don't want to remove a mapping where
+the kernel text and the LMB we intend to remove overlap in the same
+TLB mapping as it may affect the currently executing code.
+
+I've tested these changes under a kvm guest with 2 vcpus, from a split
+mapping point of view, some of the caveats mentioned above applied to
+the testing I did.
+
+Fixes: 4b5d62ca17a1 ("powerpc/mm: add radix__remove_section_mapping()")
+Signed-off-by: Balbir Singh <bsingharora@gmail.com>
+[mpe: Tweak change log to match updated behaviour]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/pgtable-radix.c | 95 +++++++++++++++++++++++++++++++---------
+ 1 file changed, 74 insertions(+), 21 deletions(-)
+
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -17,6 +17,7 @@
+ #include <linux/of_fdt.h>
+ #include <linux/mm.h>
+ #include <linux/string_helpers.h>
++#include <linux/stop_machine.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -671,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_st
+ pud_clear(pud);
+ }
+
++struct change_mapping_params {
++ pte_t *pte;
++ unsigned long start;
++ unsigned long end;
++ unsigned long aligned_start;
++ unsigned long aligned_end;
++};
++
++static int stop_machine_change_mapping(void *data)
++{
++ struct change_mapping_params *params =
++ (struct change_mapping_params *)data;
++
++ if (!data)
++ return -1;
++
++ spin_unlock(&init_mm.page_table_lock);
++ pte_clear(&init_mm, params->aligned_start, params->pte);
++ create_physical_mapping(params->aligned_start, params->start);
++ create_physical_mapping(params->end, params->aligned_end);
++ spin_lock(&init_mm.page_table_lock);
++ return 0;
++}
++
+ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
+ unsigned long end)
+ {
+@@ -699,6 +724,52 @@ static void remove_pte_table(pte_t *pte_
+ }
+ }
+
++/*
++ * clear the pte and potentially split the mapping helper
++ */
++static void split_kernel_mapping(unsigned long addr, unsigned long end,
++ unsigned long size, pte_t *pte)
++{
++ unsigned long mask = ~(size - 1);
++ unsigned long aligned_start = addr & mask;
++ unsigned long aligned_end = addr + size;
++ struct change_mapping_params params;
++ bool split_region = false;
++
++ if ((end - addr) < size) {
++ /*
++ * We're going to clear the PTE, but not flushed
++ * the mapping, time to remap and flush. The
++ * effects if visible outside the processor or
++ * if we are running in code close to the
++ * mapping we cleared, we are in trouble.
++ */
++ if (overlaps_kernel_text(aligned_start, addr) ||
++ overlaps_kernel_text(end, aligned_end)) {
++ /*
++ * Hack, just return, don't pte_clear
++ */
++ WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
++ "text, not splitting\n", addr, end);
++ return;
++ }
++ split_region = true;
++ }
++
++ if (split_region) {
++ params.pte = pte;
++ params.start = addr;
++ params.end = end;
++ params.aligned_start = addr & ~(size - 1);
++ params.aligned_end = min_t(unsigned long, aligned_end,
++ (unsigned long)__va(memblock_end_of_DRAM()));
++ stop_machine(stop_machine_change_mapping, ¶ms, NULL);
++ return;
++ }
++
++ pte_clear(&init_mm, addr, pte);
++}
++
+ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+ unsigned long end)
+ {
+@@ -714,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_
+ continue;
+
+ if (pmd_huge(*pmd)) {
+- if (!IS_ALIGNED(addr, PMD_SIZE) ||
+- !IS_ALIGNED(next, PMD_SIZE)) {
+- WARN_ONCE(1, "%s: unaligned range\n", __func__);
+- continue;
+- }
+-
+- pte_clear(&init_mm, addr, (pte_t *)pmd);
++ split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
+ continue;
+ }
+
+@@ -745,13 +810,7 @@ static void remove_pud_table(pud_t *pud_
+ continue;
+
+ if (pud_huge(*pud)) {
+- if (!IS_ALIGNED(addr, PUD_SIZE) ||
+- !IS_ALIGNED(next, PUD_SIZE)) {
+- WARN_ONCE(1, "%s: unaligned range\n", __func__);
+- continue;
+- }
+-
+- pte_clear(&init_mm, addr, (pte_t *)pud);
++ split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
+ continue;
+ }
+
+@@ -777,13 +836,7 @@ static void remove_pagetable(unsigned lo
+ continue;
+
+ if (pgd_huge(*pgd)) {
+- if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
+- !IS_ALIGNED(next, PGDIR_SIZE)) {
+- WARN_ONCE(1, "%s: unaligned range\n", __func__);
+- continue;
+- }
+-
+- pte_clear(&init_mm, addr, (pte_t *)pgd);
++ split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
+ continue;
+ }
+
--- /dev/null
+From f208820a321f9b23d77d7eed89945d862d62a3ed Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 10 Feb 2018 23:39:23 +0000
+Subject: Revert "x86/speculation: Simplify indirect_branch_prediction_barrier()"
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit f208820a321f9b23d77d7eed89945d862d62a3ed upstream.
+
+This reverts commit 64e16720ea0879f8ab4547e3b9758936d483909b.
+
+We cannot call C functions like that, without marking all the
+call-clobbered registers as, well, clobbered. We might have got away
+with it for now because the __ibp_barrier() function was *fairly*
+unlikely to actually use any other registers. But no. Just no.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan.van.de.ven@intel.com
+Cc: dave.hansen@intel.com
+Cc: jmattson@google.com
+Cc: karahmed@amazon.de
+Cc: kvm@vger.kernel.org
+Cc: pbonzini@redhat.com
+Cc: rkrcmar@redhat.com
+Cc: sironi@amazon.de
+Link: http://lkml.kernel.org/r/1518305967-31356-3-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/nospec-branch.h | 13 +++++++++----
+ arch/x86/include/asm/processor.h | 3 ---
+ arch/x86/kernel/cpu/bugs.c | 6 ------
+ 3 files changed, 9 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -164,10 +164,15 @@ static inline void vmexit_fill_RSB(void)
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+- alternative_input("",
+- "call __ibp_barrier",
+- X86_FEATURE_USE_IBPB,
+- ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
++ asm volatile(ALTERNATIVE("",
++ "movl %[msr], %%ecx\n\t"
++ "movl %[val], %%eax\n\t"
++ "movl $0, %%edx\n\t"
++ "wrmsr",
++ X86_FEATURE_USE_IBPB)
++ : : [msr] "i" (MSR_IA32_PRED_CMD),
++ [val] "i" (PRED_CMD_IBPB)
++ : "eax", "ecx", "edx", "memory");
+ }
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -969,7 +969,4 @@ bool xen_set_default_idle(void);
+
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
+-
+-void __ibp_barrier(void);
+-
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -337,9 +337,3 @@ ssize_t cpu_show_spectre_v2(struct devic
+ spectre_v2_module_string());
+ }
+ #endif
+-
+-void __ibp_barrier(void)
+-{
+- __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
+-}
+-EXPORT_SYMBOL_GPL(__ibp_barrier);
x86-xen-init-gs-very-early-to-avoid-page-faults-with-stack-protector.patch
x86-pm-make-apm-idle-driver-initialize-polling-state.patch
mm-memory_hotplug-fix-memmap-initialization.patch
+x86-entry-64-clear-extra-registers-beyond-syscall-arguments-to-reduce-speculation-attack-surface.patch
+x86-entry-64-compat-clear-registers-for-compat-syscalls-to-reduce-speculation-attack-surface.patch
+compiler-gcc.h-introduce-__optimize-function-attribute.patch
+compiler-gcc.h-__nostackprotector-needs-gcc-4.4-and-up.patch
+crypto-sun4i_ss_prng-fix-return-value-of-sun4i_ss_prng_generate.patch
+crypto-sun4i_ss_prng-convert-lock-to-_bh-in-sun4i_ss_prng_generate.patch
+powerpc-mm-radix-split-linear-mapping-on-hot-unplug.patch
+x86-mm-pti-fix-pti-comment-in-entry_syscall_64.patch
+x86-speculation-update-speculation-control-microcode-blacklist.patch
+x86-speculation-correct-speculation-control-microcode-blacklist-again.patch
+revert-x86-speculation-simplify-indirect_branch_prediction_barrier.patch
+kvm-x86-reduce-retpoline-performance-impact-in-slot_handle_level_range-by-always-inlining-iterator-helper-methods.patch
+x86-nvmx-properly-set-spec_ctrl-and-pred_cmd-before-merging-msrs.patch
+kvm-nvmx-set-the-cpu_based_use_msr_bitmaps-if-we-have-a-valid-l02-msr-bitmap.patch
+x86-speculation-clean-up-various-spectre-related-details.patch
--- /dev/null
+From 8e1eb3fa009aa7c0b944b3c8b26b07de0efb3200 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 5 Feb 2018 17:18:05 -0800
+Subject: x86/entry/64: Clear extra registers beyond syscall arguments, to reduce speculation attack surface
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 8e1eb3fa009aa7c0b944b3c8b26b07de0efb3200 upstream.
+
+At entry userspace may have (maliciously) populated the extra registers
+outside the syscall calling convention with arbitrary values that could
+be useful in a speculative execution (Spectre style) attack.
+
+Clear these registers to minimize the kernel's attack surface.
+
+Note, this only clears the extra registers and not the unused
+registers for syscalls less than 6 arguments, since those registers are
+likely to be clobbered well before their values could be put to use
+under speculation.
+
+Note, Linus found that the XOR instructions can be executed with
+minimized cost if interleaved with the PUSH instructions, and Ingo's
+analysis found that R10 and R11 should be included in the register
+clearing beyond the typical 'extra' syscall calling convention
+registers.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: <stable@vger.kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/151787988577.7847.16733592218894189003.stgit@dwillia2-desk3.amr.corp.intel.com
+[ Made small improvements to the changelog and the code comments. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_64.S | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -235,13 +235,26 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
+ pushq %r8 /* pt_regs->r8 */
+ pushq %r9 /* pt_regs->r9 */
+ pushq %r10 /* pt_regs->r10 */
++ /*
++ * Clear extra registers that a speculation attack might
++ * otherwise want to exploit. Interleave XOR with PUSH
++ * for better uop scheduling:
++ */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq %r11 /* pt_regs->r11 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq %r12 /* pt_regs->r12 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq %r13 /* pt_regs->r13 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq %r14 /* pt_regs->r14 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq %r15 /* pt_regs->r15 */
++ xorq %r15, %r15 /* nospec r15 */
+ UNWIND_HINT_REGS
+
+ TRACE_IRQS_OFF
--- /dev/null
+From 6b8cf5cc9965673951f1ab3f0e3cf23d06e3e2ee Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Mon, 5 Feb 2018 17:18:17 -0800
+Subject: x86/entry/64/compat: Clear registers for compat syscalls, to reduce speculation attack surface
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 6b8cf5cc9965673951f1ab3f0e3cf23d06e3e2ee upstream.
+
+At entry userspace may have populated registers with values that could
+otherwise be useful in a speculative execution attack. Clear them to
+minimize the kernel's attack surface.
+
+Originally-From: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: <stable@vger.kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/151787989697.7847.4083702787288600552.stgit@dwillia2-desk3.amr.corp.intel.com
+[ Made small improvements to the changelog. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_64_compat.S | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
++ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
++ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp (will be overwritten) */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
++ xorq %r15, %r15 /* nospec r15 */
+ cld
+
+ /*
+@@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram
+ pushq %rbp /* pt_regs->cx (stashed in bp) */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
++ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
++ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp (will be overwritten) */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
++ xorq %r15, %r15 /* nospec r15 */
+
+ /*
+ * User mode is traced as though IRQs are on, and SYSENTER
+@@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
++ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
++ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq %r12 /* pt_regs->r12 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq %r13 /* pt_regs->r13 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq %r14 /* pt_regs->r14 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq %r15 /* pt_regs->r15 */
++ xorq %r15, %r15 /* nospec r15 */
+ cld
+
+ /*
--- /dev/null
+From 14b1fcc62043729d12e8ae00f8297ab2ffe9fa91 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Fri, 9 Feb 2018 09:06:38 -0800
+Subject: x86/mm/pti: Fix PTI comment in entry_SYSCALL_64()
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 14b1fcc62043729d12e8ae00f8297ab2ffe9fa91 upstream.
+
+The comment is confusing since the path is taken when
+CONFIG_PAGE_TABLE_ISOLATION=y is disabled (while the comment says it is not
+taken).
+
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: nadav.amit@gmail.com
+Link: http://lkml.kernel.org/r/20180209170638.15161-1-namit@vmware.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_64.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64)
+
+ swapgs
+ /*
+- * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
++ * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
+ * is not required to switch CR3.
+ */
+ movq %rsp, PER_CPU_VAR(rsp_scratch)
--- /dev/null
+From 206587a9fb764d71f035dc7f6d3b6488f5d5b304 Mon Sep 17 00:00:00 2001
+From: KarimAllah Ahmed <karahmed@amazon.de>
+Date: Sat, 10 Feb 2018 23:39:25 +0000
+Subject: X86/nVMX: Properly set spec_ctrl and pred_cmd before merging MSRs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: KarimAllah Ahmed <karahmed@amazon.de>
+
+commit 206587a9fb764d71f035dc7f6d3b6488f5d5b304 upstream.
+
+These two variables should check whether SPEC_CTRL and PRED_CMD are
+supposed to be passed through to L2 guests or not. While
+msr_write_intercepted_l01 would return 'true' if it is not passed through.
+
+So just invert the result of msr_write_intercepted_l01 to implement the
+correct semantics.
+
+Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan.van.de.ven@intel.com
+Cc: dave.hansen@intel.com
+Cc: kvm@vger.kernel.org
+Cc: sironi@amazon.de
+Fixes: 086e7d4118cc ("KVM: VMX: Allow direct access to MSR_IA32_SPEC_CTRL")
+Link: http://lkml.kernel.org/r/1518305967-31356-5-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -10220,8 +10220,8 @@ static inline bool nested_vmx_merge_msr_
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
++ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
++ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !pred_cmd && !spec_ctrl)
--- /dev/null
+From 21e433bdb95bdf3aa48226fd3d33af608437f293 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Tue, 13 Feb 2018 09:03:08 +0100
+Subject: x86/speculation: Clean up various Spectre related details
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit 21e433bdb95bdf3aa48226fd3d33af608437f293 upstream.
+
+Harmonize all the Spectre messages so that a:
+
+ dmesg | grep -i spectre
+
+... gives us most Spectre related kernel boot messages.
+
+Also fix a few other details:
+
+ - clarify a comment about firmware speculation control
+
+ - s/KPTI/PTI
+
+ - remove various line-breaks that made the code uglier
+
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/bugs.c | 28 +++++++++++-----------------
+ 1 file changed, 11 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ return SPECTRE_V2_CMD_NONE;
+ else {
+- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+- sizeof(arg));
++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+ if (ret < 0)
+ return SPECTRE_V2_CMD_AUTO;
+
+@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __
+ }
+
+ if (i >= ARRAY_SIZE(mitigation_options)) {
+- pr_err("unknown option (%s). Switching to AUTO select\n",
+- mitigation_options[i].option);
++ pr_err("unknown option (%s). Switching to AUTO select\n", mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+ }
+@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __
+ cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
+ cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
+ !IS_ENABLED(CONFIG_RETPOLINE)) {
+- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+- mitigation_options[i].option);
++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
+@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mit
+ goto retpoline_auto;
+ break;
+ }
+- pr_err("kernel not compiled with retpoline; no mitigation available!");
++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
+ return;
+
+ retpoline_auto:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ retpoline_amd:
+ if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
+ goto retpoline_generic;
+ }
+ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+@@ -281,7 +278,7 @@ retpoline_auto:
+ pr_info("%s\n", spectre_v2_strings[mode]);
+
+ /*
+- * If neither SMEP or KPTI are available, there is a risk of
++ * If neither SMEP nor PTI are available, there is a risk of
+ * hitting userspace addresses in the RSB after a context switch
+ * from a shallow call stack to a deeper one. To prevent this fill
+ * the entire RSB, even when using IBRS.
+@@ -295,21 +292,20 @@ retpoline_auto:
+ if ((!boot_cpu_has(X86_FEATURE_PTI) &&
+ !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+- pr_info("Filling RSB on context switch\n");
++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+- pr_info("Enabling Indirect Branch Prediction Barrier\n");
++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
+ }
+ }
+
+ #undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+-ssize_t cpu_show_meltdown(struct device *dev,
+- struct device_attribute *attr, char *buf)
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+ return sprintf(buf, "Not affected\n");
+@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device
+ return sprintf(buf, "Vulnerable\n");
+ }
+
+-ssize_t cpu_show_spectre_v1(struct device *dev,
+- struct device_attribute *attr, char *buf)
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+ return sprintf(buf, "Not affected\n");
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ }
+
+-ssize_t cpu_show_spectre_v2(struct device *dev,
+- struct device_attribute *attr, char *buf)
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
--- /dev/null
+From d37fc6d360a404b208547ba112e7dabb6533c7fc Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Mon, 12 Feb 2018 15:27:34 +0000
+Subject: x86/speculation: Correct Speculation Control microcode blacklist again
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit d37fc6d360a404b208547ba112e7dabb6533c7fc upstream.
+
+Arjan points out that the Intel document only clears the 0xc2 microcode
+on *some* parts with CPUID 506E3 (INTEL_FAM6_SKYLAKE_DESKTOP stepping 3).
+For the Skylake H/S platform it's OK but for Skylake E3 which has the
+same CPUID it isn't (yet) cleared.
+
+So removing it from the blacklist was premature. Put it back for now.
+
+Also, Arjan assures me that the 0x84 microcode for Kaby Lake which was
+featured in one of the early revisions of the Intel document was never
+released to the public, and won't be until/unless it is also validated
+as safe. So those can change to 0x80 which is what all *other* versions
+of the doc have identified.
+
+Once the retrospective testing of existing public microcodes is done, we
+should be back into a mode where new microcodes are only released in
+batches and we shouldn't even need to update the blacklist for those
+anyway, so this tweaking of the list isn't expected to be a thing which
+keeps happening.
+
+Requested-by: Arjan van de Ven <arjan.van.de.ven@intel.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan.van.de.ven@intel.com
+Cc: dave.hansen@intel.com
+Cc: kvm@vger.kernel.org
+Cc: pbonzini@redhat.com
+Link: http://lkml.kernel.org/r/1518449255-2182-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -116,13 +116,14 @@ struct sku_microcode {
+ u32 microcode;
+ };
+ static const struct sku_microcode spectre_bad_microcodes[] = {
+- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
+- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
+- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
+- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
+- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
++ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
--- /dev/null
+From 1751342095f0d2b36fa8114d8e12c5688c455ac4 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 10 Feb 2018 23:39:22 +0000
+Subject: x86/speculation: Update Speculation Control microcode blacklist
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 1751342095f0d2b36fa8114d8e12c5688c455ac4 upstream.
+
+Intel have retroactively blessed the 0xc2 microcode on Skylake mobile
+and desktop parts, and the Gemini Lake 0x22 microcode is apparently fine
+too. We blacklisted the latter purely because it was present with all
+the other problematic ones in the 2018-01-08 release, but now it's
+explicitly listed as OK.
+
+We still list 0x84 for the various Kaby Lake / Coffee Lake parts, as
+that appeared in one version of the blacklist and then reverted to
+0x80 again. We can change it if 0x84 is actually announced to be safe.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: arjan.van.de.ven@intel.com
+Cc: jmattson@google.com
+Cc: karahmed@amazon.de
+Cc: kvm@vger.kernel.org
+Cc: pbonzini@redhat.com
+Cc: rkrcmar@redhat.com
+Cc: sironi@amazon.de
+Link: http://lkml.kernel.org/r/1518305967-31356-2-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -123,8 +123,6 @@ static const struct sku_microcode spectr
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
+- { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+@@ -136,8 +134,6 @@ static const struct sku_microcode spectr
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+- /* Updated in the 20180108 release; blacklist until we know otherwise */
+- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },