--- /dev/null
+From d49dbfade96d5b0863ca8a90122a805edd5ef50a Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 15 Aug 2018 08:14:37 -0500
+Subject: hwmon: (nct6775) Fix potential Spectre v1
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit d49dbfade96d5b0863ca8a90122a805edd5ef50a upstream.
+
+val can be indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+vers/hwmon/nct6775.c:2698 store_pwm_weight_temp_sel() warn: potential
+spectre issue 'data->temp_src' [r]
+
+Fix this by sanitizing val before using it to index data->temp_src
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/nct6775.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -63,6 +63,7 @@
+ #include <linux/bitops.h>
+ #include <linux/dmi.h>
+ #include <linux/io.h>
++#include <linux/nospec.h>
+ #include "lm75.h"
+
+ #define USE_ALTERNATE
+@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device
+ return err;
+ if (val > NUM_TEMP)
+ return -EINVAL;
++ val = array_index_nospec(val, NUM_TEMP + 1);
+ if (val && (!(data->have_temp & BIT(val - 1)) ||
+ !data->temp_src[val - 1]))
+ return -EINVAL;
soc-qcom-rmtfs-mem-fix-memleak-in-probe-error-paths.patch
pinctrl-freescale-off-by-one-in-imx1_pinconf_group_dbg_show.patch
scsi-qla2xxx-fix-stalled-relogin.patch
+x86-vdso-fix-lsl-operand-order.patch
+x86-nmi-fix-nmi-uaccess-race-against-cr3-switching.patch
+x86-irqflags-mark-native_restore_fl-extern-inline.patch
+x86-spectre-add-missing-family-6-check-to-microcode-check.patch
+x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch
+hwmon-nct6775-fix-potential-spectre-v1.patch
+x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch
+x86-allow-generating-user-space-headers-without-a-compiler.patch
x86-kvm-avoid-unused-variable-warning.patch
--- /dev/null
+From 829fe4aa9ac16417a904ad1de1307de906854bcf Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Wed, 29 Aug 2018 20:43:17 +0100
+Subject: x86: Allow generating user-space headers without a compiler
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit 829fe4aa9ac16417a904ad1de1307de906854bcf upstream.
+
+When bootstrapping an architecture, it's usual to generate the kernel's
+user-space headers (make headers_install) before building a compiler. Move
+the compiler check (for asm goto support) to the archprepare target so that
+it is only done when building code for the target.
+
+Fixes: e501ce957a78 ("x86: Force asm-goto")
+Reported-by: Helmut Grohne <helmutg@debian.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180829194317.GA4765@decadent.org.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Makefile | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -180,10 +180,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ endif
+ endif
+
+-ifndef CC_HAVE_ASM_GOTO
+- $(error Compiler lacks asm-goto support.)
+-endif
+-
+ #
+ # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
+ # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
+@@ -317,6 +313,13 @@ PHONY += vdso_install
+ vdso_install:
+ $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
+
++archprepare: checkbin
++checkbin:
++ifndef CC_HAVE_ASM_GOTO
++ @echo Compiler lacks asm-goto support.
++ @exit 1
++endif
++
+ archclean:
+ $(Q)rm -rf $(objtree)/arch/i386
+ $(Q)rm -rf $(objtree)/arch/x86_64
--- /dev/null
+From f12d11c5c184626b4befdee3d573ec8237405a33 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Tue, 28 Aug 2018 20:40:33 +0200
+Subject: x86/entry/64: Wipe KASAN stack shadow before rewind_stack_do_exit()
+
+From: Jann Horn <jannh@google.com>
+
+commit f12d11c5c184626b4befdee3d573ec8237405a33 upstream.
+
+Reset the KASAN shadow state of the task stack before rewinding RSP.
+Without this, a kernel oops will leave parts of the stack poisoned, and
+code running under do_exit() can trip over such poisoned regions and cause
+nonsensical false-positive KASAN reports about stack-out-of-bounds bugs.
+
+This does not wipe the exception stacks; if an oops happens on an exception
+stack, it might result in random KASAN false-positives from other tasks
+afterwards. This is probably relatively uninteresting, since if the kernel
+oopses on an exception stack, there are most likely bigger things to worry
+about. It'd be more interesting if vmapped stacks and KASAN were
+compatible, since then handle_stack_overflow() would oops from exception
+stack context.
+
+Fixes: 2deb4be28077 ("x86/dumpstack: When OOPSing, rewind the stack before do_exit()")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kasan-dev@googlegroups.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180828184033.93712-1-jannh@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/dumpstack.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -17,6 +17,7 @@
+ #include <linux/bug.h>
+ #include <linux/nmi.h>
+ #include <linux/sysfs.h>
++#include <linux/kasan.h>
+
+ #include <asm/cpu_entry_area.h>
+ #include <asm/stacktrace.h>
+@@ -356,7 +357,10 @@ void oops_end(unsigned long flags, struc
+ * We're not going to return, but we might be on an IST stack or
+ * have very little stack space left. Rewind the stack and kill
+ * the task.
++ * Before we rewind the stack, we have to tell KASAN that we're going to
++ * reuse the task stack and that existing poisons are invalid.
+ */
++ kasan_unpoison_task_stack(current);
+ rewind_stack_do_exit(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
--- /dev/null
+From 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Mon, 27 Aug 2018 14:40:09 -0700
+Subject: x86/irqflags: Mark native_restore_fl extern inline
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+commit 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d upstream.
+
+This should have been marked extern inline in order to pick up the out
+of line definition in arch/x86/kernel/irqflags.S.
+
+Fixes: 208cbb325589 ("x86/irqflags: Provide a declaration for native_save_fl")
+Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180827214011.55428-1-ndesaulniers@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/irqflags.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -33,7 +33,8 @@ extern inline unsigned long native_save_
+ return flags;
+ }
+
+-static inline void native_restore_fl(unsigned long flags)
++extern inline void native_restore_fl(unsigned long flags);
++extern inline void native_restore_fl(unsigned long flags)
+ {
+ asm volatile("push %0 ; popf"
+ : /* no output */
--- /dev/null
+From 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Wed, 29 Aug 2018 08:47:18 -0700
+Subject: x86/nmi: Fix NMI uaccess race against CR3 switching
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 upstream.
+
+A NMI can hit in the middle of context switching or in the middle of
+switch_mm_irqs_off(). In either case, CR3 might not match current->mm,
+which could cause copy_from_user_nmi() and friends to read the wrong
+memory.
+
+Fix it by adding a new nmi_uaccess_okay() helper and checking it in
+copy_from_user_nmi() and in __copy_from_user_nmi()'s callers.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jann Horn <jannh@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/dd956eba16646fd0b15c3c0741269dfd84452dac.1535557289.git.luto@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/core.c | 2 +-
+ arch/x86/include/asm/tlbflush.h | 40 ++++++++++++++++++++++++++++++++++++++++
+ arch/x86/lib/usercopy.c | 5 +++++
+ arch/x86/mm/tlb.c | 7 +++++++
+ 4 files changed, 53 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchai
+
+ perf_callchain_store(entry, regs->ip);
+
+- if (!current->mm)
++ if (!nmi_uaccess_okay())
+ return;
+
+ if (perf_callchain_user32(regs, entry))
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -175,8 +175,16 @@ struct tlb_state {
+ * are on. This means that it may not match current->active_mm,
+ * which will contain the previous user mm when we're in lazy TLB
+ * mode even if we've already switched back to swapper_pg_dir.
++ *
++ * During switch_mm_irqs_off(), loaded_mm will be set to
++ * LOADED_MM_SWITCHING during the brief interrupts-off window
++ * when CR3 and loaded_mm would otherwise be inconsistent. This
++ * is for nmi_uaccess_okay()'s benefit.
+ */
+ struct mm_struct *loaded_mm;
++
++#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
++
+ u16 loaded_mm_asid;
+ u16 next_asid;
+ /* last user mm's ctx id */
+@@ -246,6 +254,38 @@ struct tlb_state {
+ };
+ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+
++/*
++ * Blindly accessing user memory from NMI context can be dangerous
++ * if we're in the middle of switching the current user task or
++ * switching the loaded mm. It can also be dangerous if we
++ * interrupted some kernel code that was temporarily using a
++ * different mm.
++ */
++static inline bool nmi_uaccess_okay(void)
++{
++ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
++ struct mm_struct *current_mm = current->mm;
++
++ VM_WARN_ON_ONCE(!loaded_mm);
++
++ /*
++ * The condition we want to check is
++ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
++ * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
++ * is supposed to be reasonably fast.
++ *
++ * Instead, we check the almost equivalent but somewhat conservative
++ * condition below, and we rely on the fact that switch_mm_irqs_off()
++ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
++ */
++ if (loaded_mm != current_mm)
++ return false;
++
++ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
++
++ return true;
++}
++
+ /* Initialize cr4 shadow for this CPU. */
+ static inline void cr4_init_shadow(void)
+ {
+--- a/arch/x86/lib/usercopy.c
++++ b/arch/x86/lib/usercopy.c
+@@ -7,6 +7,8 @@
+ #include <linux/uaccess.h>
+ #include <linux/export.h>
+
++#include <asm/tlbflush.h>
++
+ /*
+ * We rely on the nested NMI work to allow atomic faults from the NMI path; the
+ * nested NMI paths are careful to preserve CR2.
+@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void
+ if (__range_not_ok(from, n, TASK_SIZE))
+ return n;
+
++ if (!nmi_uaccess_okay())
++ return n;
++
+ /*
+ * Even though this function is typically called from NMI/IRQ context
+ * disable pagefaults so that its behaviour is consistent even when
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -298,6 +298,10 @@ void switch_mm_irqs_off(struct mm_struct
+
+ choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+
++ /* Let nmi_uaccess_okay() know that we're changing CR3. */
++ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
++ barrier();
++
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+@@ -328,6 +332,9 @@ void switch_mm_irqs_off(struct mm_struct
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
++ /* Make sure we write CR3 before loaded_mm. */
++ barrier();
++
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ }
--- /dev/null
+From 1ab534e85c93945f7862378d8c8adcf408205b19 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 24 Aug 2018 10:03:51 -0700
+Subject: x86/spectre: Add missing family 6 check to microcode check
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 1ab534e85c93945f7862378d8c8adcf408205b19 upstream.
+
+The check for Spectre microcodes does not check for family 6, only the
+model numbers.
+
+Add a family 6 check to avoid ambiguity with other families.
+
+Fixes: a5b296636453 ("x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes")
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180824170351.34874-2-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
++ if (c->x86 != 6)
++ return false;
++
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_stepping == spectre_bad_microcodes[i].stepping)
--- /dev/null
+From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 24 Aug 2018 10:03:50 -0700
+Subject: x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 upstream.
+
+On Nehalem and newer core CPUs the CPU cache internally uses 44 bits
+physical address space. The L1TF workaround is limited by this internal
+cache address width, and needs to have one bit free there for the
+mitigation to work.
+
+Older client systems report only 36bit physical address space so the range
+check decides that L1TF is not mitigated for a 36bit phys/32GB system with
+some memory holes.
+
+But since these actually have the larger internal cache width this warning
+is bogus because it would only really be needed if the system had more than
+43bits of memory.
+
+Add a new internal x86_cache_bits field. Normally it is the same as the
+physical bits field reported by CPUID, but for Nehalem and newerforce it to
+be at least 44bits.
+
+Change the L1TF memory size warning to use the new cache_bits field to
+avoid bogus warnings and remove the bogus comment about memory size.
+
+Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
+Reported-by: George Anchev <studio@anchev.net>
+Reported-by: Christopher Snowhill <kode54@gmail.com>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: Michael Hocko <mhocko@suse.com>
+Cc: vbabka@suse.cz
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/processor.h | 4 ++-
+ arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++++++++-----
+ arch/x86/kernel/cpu/common.c | 1
+ 3 files changed, 45 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
+ /* Index into per_cpu list: */
+ u16 cpu_index;
+ u32 microcode;
++ /* Address space bits used by the cache internally */
++ u8 x86_cache_bits;
+ unsigned initialized : 1;
+ } __randomize_layout;
+
+@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x8
+
+ static inline unsigned long long l1tf_pfn_limit(void)
+ {
+- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+ }
+
+ extern void early_cpu_init(void);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+
++/*
++ * These CPUs all support 44bits physical address space internally in the
++ * cache but CPUID can report a smaller number of physical address bits.
++ *
++ * The L1TF mitigation uses the top most address bit for the inversion of
++ * non present PTEs. When the installed memory reaches into the top most
++ * address bit due to memory holes, which has been observed on machines
++ * which report 36bits physical address bits and have 32G RAM installed,
++ * then the mitigation range check in l1tf_select_mitigation() triggers.
++ * This is a false positive because the mitigation is still possible due to
++ * the fact that the cache uses 44bit internally. Use the cache bits
++ * instead of the reported physical bits and adjust them on the affected
++ * machines to 44bit if the reported bits are less than 44.
++ */
++static void override_cache_bits(struct cpuinfo_x86 *c)
++{
++ if (c->x86 != 6)
++ return;
++
++ switch (c->x86_model) {
++ case INTEL_FAM6_NEHALEM:
++ case INTEL_FAM6_WESTMERE:
++ case INTEL_FAM6_SANDYBRIDGE:
++ case INTEL_FAM6_IVYBRIDGE:
++ case INTEL_FAM6_HASWELL_CORE:
++ case INTEL_FAM6_HASWELL_ULT:
++ case INTEL_FAM6_HASWELL_GT3E:
++ case INTEL_FAM6_BROADWELL_CORE:
++ case INTEL_FAM6_BROADWELL_GT3E:
++ case INTEL_FAM6_SKYLAKE_MOBILE:
++ case INTEL_FAM6_SKYLAKE_DESKTOP:
++ case INTEL_FAM6_KABYLAKE_MOBILE:
++ case INTEL_FAM6_KABYLAKE_DESKTOP:
++ if (c->x86_cache_bits < 44)
++ c->x86_cache_bits = 44;
++ break;
++ }
++}
++
+ static void __init l1tf_select_mitigation(void)
+ {
+ u64 half_pa;
+@@ -659,6 +698,8 @@ static void __init l1tf_select_mitigatio
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
++ override_cache_bits(&boot_cpu_data);
++
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+@@ -678,11 +719,6 @@ static void __init l1tf_select_mitigatio
+ return;
+ #endif
+
+- /*
+- * This is extremely unlikely to happen because almost all
+- * systems have far more MAX_PA/2 than RAM can be fit into
+- * DIMM slots.
+- */
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinf
+ else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
+ c->x86_phys_bits = 36;
+ #endif
++ c->x86_cache_bits = c->x86_phys_bits;
+ }
+
+ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
--- /dev/null
+From e78e5a91456fcecaa2efbb3706572fe043766f4d Mon Sep 17 00:00:00 2001
+From: Samuel Neves <sneves@dei.uc.pt>
+Date: Sat, 1 Sep 2018 21:14:52 +0100
+Subject: x86/vdso: Fix lsl operand order
+
+From: Samuel Neves <sneves@dei.uc.pt>
+
+commit e78e5a91456fcecaa2efbb3706572fe043766f4d upstream.
+
+In the __getcpu function, lsl is using the wrong target and destination
+registers. Luckily, the compiler tends to choose %eax for both variables,
+so it has been working so far.
+
+Fixes: a582c540ac1b ("x86/vdso: Use RDPID in preference to LSL when available")
+Signed-off-by: Samuel Neves <sneves@dei.uc.pt>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Andy Lutomirski <luto@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180901201452.27828-1-sneves@dei.uc.pt
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/vgtod.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/vgtod.h
++++ b/arch/x86/include/asm/vgtod.h
+@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void
+ *
+ * If RDPID is available, use it.
+ */
+- alternative_io ("lsl %[p],%[seg]",
++ alternative_io ("lsl %[seg],%[p]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));