From: Greg Kroah-Hartman Date: Mon, 3 Sep 2018 13:06:22 +0000 (+0200) Subject: 4.18-stable patches X-Git-Tag: v3.18.121~14 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=5eb24753af68b4d5184a1fef4de493d5147966f0;p=thirdparty%2Fkernel%2Fstable-queue.git 4.18-stable patches added patches: hwmon-nct6775-fix-potential-spectre-v1.patch x86-allow-generating-user-space-headers-without-a-compiler.patch x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch x86-irqflags-mark-native_restore_fl-extern-inline.patch x86-nmi-fix-nmi-uaccess-race-against-cr3-switching.patch x86-spectre-add-missing-family-6-check-to-microcode-check.patch x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch x86-vdso-fix-lsl-operand-order.patch --- diff --git a/queue-4.18/hwmon-nct6775-fix-potential-spectre-v1.patch b/queue-4.18/hwmon-nct6775-fix-potential-spectre-v1.patch new file mode 100644 index 00000000000..5a4fffe57c9 --- /dev/null +++ b/queue-4.18/hwmon-nct6775-fix-potential-spectre-v1.patch @@ -0,0 +1,52 @@ +From d49dbfade96d5b0863ca8a90122a805edd5ef50a Mon Sep 17 00:00:00 2001 +From: "Gustavo A. R. Silva" +Date: Wed, 15 Aug 2018 08:14:37 -0500 +Subject: hwmon: (nct6775) Fix potential Spectre v1 + +From: Gustavo A. R. Silva + +commit d49dbfade96d5b0863ca8a90122a805edd5ef50a upstream. + +val can be indirectly controlled by user-space, hence leading to +a potential exploitation of the Spectre variant 1 vulnerability. + +This issue was detected with the help of Smatch: + +vers/hwmon/nct6775.c:2698 store_pwm_weight_temp_sel() warn: potential +spectre issue 'data->temp_src' [r] + +Fix this by sanitizing val before using it to index data->temp_src + +Notice that given that speculation windows are large, the policy is +to kill the speculation on the first load and not worry if it can be +completed with a dependent load/store [1]. + +[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2 + +Cc: stable@vger.kernel.org +Signed-off-by: Gustavo A. R. Silva +Signed-off-by: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/hwmon/nct6775.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/hwmon/nct6775.c ++++ b/drivers/hwmon/nct6775.c +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + #include "lm75.h" + + #define USE_ALTERNATE +@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device + return err; + if (val > NUM_TEMP) + return -EINVAL; ++ val = array_index_nospec(val, NUM_TEMP + 1); + if (val && (!(data->have_temp & BIT(val - 1)) || + !data->temp_src[val - 1])) + return -EINVAL; diff --git a/queue-4.18/series b/queue-4.18/series index d613cc93069..8915b8da3d2 100644 --- a/queue-4.18/series +++ b/queue-4.18/series @@ -76,4 +76,12 @@ asoc-wm_adsp-correct-dsp-pointer-for-preloader-control.patch soc-qcom-rmtfs-mem-fix-memleak-in-probe-error-paths.patch pinctrl-freescale-off-by-one-in-imx1_pinconf_group_dbg_show.patch scsi-qla2xxx-fix-stalled-relogin.patch +x86-vdso-fix-lsl-operand-order.patch +x86-nmi-fix-nmi-uaccess-race-against-cr3-switching.patch +x86-irqflags-mark-native_restore_fl-extern-inline.patch +x86-spectre-add-missing-family-6-check-to-microcode-check.patch +x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch +hwmon-nct6775-fix-potential-spectre-v1.patch +x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch +x86-allow-generating-user-space-headers-without-a-compiler.patch x86-kvm-avoid-unused-variable-warning.patch diff --git a/queue-4.18/x86-allow-generating-user-space-headers-without-a-compiler.patch b/queue-4.18/x86-allow-generating-user-space-headers-without-a-compiler.patch new file mode 100644 index 00000000000..9f5e105f325 --- /dev/null +++ b/queue-4.18/x86-allow-generating-user-space-headers-without-a-compiler.patch @@ -0,0 +1,53 @@ +From 829fe4aa9ac16417a904ad1de1307de906854bcf Mon Sep 17 00:00:00 2001 +From: Ben Hutchings +Date: Wed, 29 Aug 2018 20:43:17 +0100 +Subject: x86: Allow generating user-space headers without a compiler + +From: Ben Hutchings + +commit 829fe4aa9ac16417a904ad1de1307de906854bcf upstream. + +When bootstrapping an architecture, it's usual to generate the kernel's +user-space headers (make headers_install) before building a compiler. Move +the compiler check (for asm goto support) to the archprepare target so that +it is only done when building code for the target. + +Fixes: e501ce957a78 ("x86: Force asm-goto") +Reported-by: Helmut Grohne +Signed-off-by: Ben Hutchings +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20180829194317.GA4765@decadent.org.uk +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/Makefile | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -180,10 +180,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER + endif + endif + +-ifndef CC_HAVE_ASM_GOTO +- $(error Compiler lacks asm-goto support.) +-endif +- + # + # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a + # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way +@@ -317,6 +313,13 @@ PHONY += vdso_install + vdso_install: + $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ + ++archprepare: checkbin ++checkbin: ++ifndef CC_HAVE_ASM_GOTO ++ @echo Compiler lacks asm-goto support. ++ @exit 1 ++endif ++ + archclean: + $(Q)rm -rf $(objtree)/arch/i386 + $(Q)rm -rf $(objtree)/arch/x86_64 diff --git a/queue-4.18/x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch b/queue-4.18/x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch new file mode 100644 index 00000000000..c4066af6f43 --- /dev/null +++ b/queue-4.18/x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch @@ -0,0 +1,60 @@ +From f12d11c5c184626b4befdee3d573ec8237405a33 Mon Sep 17 00:00:00 2001 +From: Jann Horn +Date: Tue, 28 Aug 2018 20:40:33 +0200 +Subject: x86/entry/64: Wipe KASAN stack shadow before rewind_stack_do_exit() + +From: Jann Horn + +commit f12d11c5c184626b4befdee3d573ec8237405a33 upstream. + +Reset the KASAN shadow state of the task stack before rewinding RSP. +Without this, a kernel oops will leave parts of the stack poisoned, and +code running under do_exit() can trip over such poisoned regions and cause +nonsensical false-positive KASAN reports about stack-out-of-bounds bugs. + +This does not wipe the exception stacks; if an oops happens on an exception +stack, it might result in random KASAN false-positives from other tasks +afterwards. This is probably relatively uninteresting, since if the kernel +oopses on an exception stack, there are most likely bigger things to worry +about. It'd be more interesting if vmapped stacks and KASAN were +compatible, since then handle_stack_overflow() would oops from exception +stack context. + +Fixes: 2deb4be28077 ("x86/dumpstack: When OOPSing, rewind the stack before do_exit()") +Signed-off-by: Jann Horn +Signed-off-by: Thomas Gleixner +Acked-by: Andrey Ryabinin +Cc: Andy Lutomirski +Cc: Dmitry Vyukov +Cc: Alexander Potapenko +Cc: Kees Cook +Cc: kasan-dev@googlegroups.com +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20180828184033.93712-1-jannh@google.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/dumpstack.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/x86/kernel/dumpstack.c ++++ b/arch/x86/kernel/dumpstack.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -356,7 +357,10 @@ void oops_end(unsigned long flags, struc + * We're not going to return, but we might be on an IST stack or + * have very little stack space left. Rewind the stack and kill + * the task. ++ * Before we rewind the stack, we have to tell KASAN that we're going to ++ * reuse the task stack and that existing poisons are invalid. + */ ++ kasan_unpoison_task_stack(current); + rewind_stack_do_exit(signr); + } + NOKPROBE_SYMBOL(oops_end); diff --git a/queue-4.18/x86-irqflags-mark-native_restore_fl-extern-inline.patch b/queue-4.18/x86-irqflags-mark-native_restore_fl-extern-inline.patch new file mode 100644 index 00000000000..1f4f800e9e8 --- /dev/null +++ b/queue-4.18/x86-irqflags-mark-native_restore_fl-extern-inline.patch @@ -0,0 +1,40 @@ +From 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d Mon Sep 17 00:00:00 2001 +From: Nick Desaulniers +Date: Mon, 27 Aug 2018 14:40:09 -0700 +Subject: x86/irqflags: Mark native_restore_fl extern inline + +From: Nick Desaulniers + +commit 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d upstream. + +This should have been marked extern inline in order to pick up the out +of line definition in arch/x86/kernel/irqflags.S. + +Fixes: 208cbb325589 ("x86/irqflags: Provide a declaration for native_save_fl") +Reported-by: Ben Hutchings +Signed-off-by: Nick Desaulniers +Signed-off-by: Thomas Gleixner +Reviewed-by: Juergen Gross +Cc: "H. Peter Anvin" +Cc: Boris Ostrovsky +Cc: Greg Kroah-Hartman +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20180827214011.55428-1-ndesaulniers@google.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/irqflags.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -33,7 +33,8 @@ extern inline unsigned long native_save_ + return flags; + } + +-static inline void native_restore_fl(unsigned long flags) ++extern inline void native_restore_fl(unsigned long flags); ++extern inline void native_restore_fl(unsigned long flags) + { + asm volatile("push %0 ; popf" + : /* no output */ diff --git a/queue-4.18/x86-nmi-fix-nmi-uaccess-race-against-cr3-switching.patch b/queue-4.18/x86-nmi-fix-nmi-uaccess-race-against-cr3-switching.patch new file mode 100644 index 00000000000..9da2a67ee93 --- /dev/null +++ b/queue-4.18/x86-nmi-fix-nmi-uaccess-race-against-cr3-switching.patch @@ -0,0 +1,148 @@ +From 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Wed, 29 Aug 2018 08:47:18 -0700 +Subject: x86/nmi: Fix NMI uaccess race against CR3 switching + +From: Andy Lutomirski + +commit 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 upstream. + +A NMI can hit in the middle of context switching or in the middle of +switch_mm_irqs_off(). In either case, CR3 might not match current->mm, +which could cause copy_from_user_nmi() and friends to read the wrong +memory. + +Fix it by adding a new nmi_uaccess_okay() helper and checking it in +copy_from_user_nmi() and in __copy_from_user_nmi()'s callers. + +Signed-off-by: Andy Lutomirski +Signed-off-by: Thomas Gleixner +Reviewed-by: Rik van Riel +Cc: Nadav Amit +Cc: Borislav Petkov +Cc: Jann Horn +Cc: Peter Zijlstra +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/dd956eba16646fd0b15c3c0741269dfd84452dac.1535557289.git.luto@kernel.org +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/events/core.c | 2 +- + arch/x86/include/asm/tlbflush.h | 40 ++++++++++++++++++++++++++++++++++++++++ + arch/x86/lib/usercopy.c | 5 +++++ + arch/x86/mm/tlb.c | 7 +++++++ + 4 files changed, 53 insertions(+), 1 deletion(-) + +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchai + + perf_callchain_store(entry, regs->ip); + +- if (!current->mm) ++ if (!nmi_uaccess_okay()) + return; + + if (perf_callchain_user32(regs, entry)) +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -175,8 +175,16 @@ struct tlb_state { + * are on. This means that it may not match current->active_mm, + * which will contain the previous user mm when we're in lazy TLB + * mode even if we've already switched back to swapper_pg_dir. ++ * ++ * During switch_mm_irqs_off(), loaded_mm will be set to ++ * LOADED_MM_SWITCHING during the brief interrupts-off window ++ * when CR3 and loaded_mm would otherwise be inconsistent. This ++ * is for nmi_uaccess_okay()'s benefit. + */ + struct mm_struct *loaded_mm; ++ ++#define LOADED_MM_SWITCHING ((struct mm_struct *)1) ++ + u16 loaded_mm_asid; + u16 next_asid; + /* last user mm's ctx id */ +@@ -246,6 +254,38 @@ struct tlb_state { + }; + DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); + ++/* ++ * Blindly accessing user memory from NMI context can be dangerous ++ * if we're in the middle of switching the current user task or ++ * switching the loaded mm. It can also be dangerous if we ++ * interrupted some kernel code that was temporarily using a ++ * different mm. ++ */ ++static inline bool nmi_uaccess_okay(void) ++{ ++ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); ++ struct mm_struct *current_mm = current->mm; ++ ++ VM_WARN_ON_ONCE(!loaded_mm); ++ ++ /* ++ * The condition we want to check is ++ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, ++ * if we're running in a VM with shadow paging, and nmi_uaccess_okay() ++ * is supposed to be reasonably fast. ++ * ++ * Instead, we check the almost equivalent but somewhat conservative ++ * condition below, and we rely on the fact that switch_mm_irqs_off() ++ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. ++ */ ++ if (loaded_mm != current_mm) ++ return false; ++ ++ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); ++ ++ return true; ++} ++ + /* Initialize cr4 shadow for this CPU. */ + static inline void cr4_init_shadow(void) + { +--- a/arch/x86/lib/usercopy.c ++++ b/arch/x86/lib/usercopy.c +@@ -7,6 +7,8 @@ + #include + #include + ++#include ++ + /* + * We rely on the nested NMI work to allow atomic faults from the NMI path; the + * nested NMI paths are careful to preserve CR2. +@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void + if (__range_not_ok(from, n, TASK_SIZE)) + return n; + ++ if (!nmi_uaccess_okay()) ++ return n; ++ + /* + * Even though this function is typically called from NMI/IRQ context + * disable pagefaults so that its behaviour is consistent even when +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -298,6 +298,10 @@ void switch_mm_irqs_off(struct mm_struct + + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + ++ /* Let nmi_uaccess_okay() know that we're changing CR3. */ ++ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); ++ barrier(); ++ + if (need_flush) { + this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); + this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); +@@ -328,6 +332,9 @@ void switch_mm_irqs_off(struct mm_struct + if (next != &init_mm) + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + ++ /* Make sure we write CR3 before loaded_mm. */ ++ barrier(); ++ + this_cpu_write(cpu_tlbstate.loaded_mm, next); + this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); + } diff --git a/queue-4.18/x86-spectre-add-missing-family-6-check-to-microcode-check.patch b/queue-4.18/x86-spectre-add-missing-family-6-check-to-microcode-check.patch new file mode 100644 index 00000000000..e5d28f36fe9 --- /dev/null +++ b/queue-4.18/x86-spectre-add-missing-family-6-check-to-microcode-check.patch @@ -0,0 +1,39 @@ +From 1ab534e85c93945f7862378d8c8adcf408205b19 Mon Sep 17 00:00:00 2001 +From: Andi Kleen +Date: Fri, 24 Aug 2018 10:03:51 -0700 +Subject: x86/spectre: Add missing family 6 check to microcode check + +From: Andi Kleen + +commit 1ab534e85c93945f7862378d8c8adcf408205b19 upstream. + +The check for Spectre microcodes does not check for family 6, only the +model numbers. + +Add a family 6 check to avoid ambiguity with other families. + +Fixes: a5b296636453 ("x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes") +Signed-off-by: Andi Kleen +Signed-off-by: Thomas Gleixner +Cc: x86@kernel.org +Cc: linux-kernel@vger.kernel.org +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20180824170351.34874-2-andi@firstfloor.org +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/cpu/intel.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return false; + ++ if (c->x86 != 6) ++ return false; ++ + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) diff --git a/queue-4.18/x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch b/queue-4.18/x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch new file mode 100644 index 00000000000..8b4994d950f --- /dev/null +++ b/queue-4.18/x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch @@ -0,0 +1,147 @@ +From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001 +From: Andi Kleen +Date: Fri, 24 Aug 2018 10:03:50 -0700 +Subject: x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ + +From: Andi Kleen + +commit cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 upstream. + +On Nehalem and newer core CPUs the CPU cache internally uses 44 bits +physical address space. The L1TF workaround is limited by this internal +cache address width, and needs to have one bit free there for the +mitigation to work. + +Older client systems report only 36bit physical address space so the range +check decides that L1TF is not mitigated for a 36bit phys/32GB system with +some memory holes. + +But since these actually have the larger internal cache width this warning +is bogus because it would only really be needed if the system had more than +43bits of memory. + +Add a new internal x86_cache_bits field. Normally it is the same as the +physical bits field reported by CPUID, but for Nehalem and newerforce it to +be at least 44bits. + +Change the L1TF memory size warning to use the new cache_bits field to +avoid bogus warnings and remove the bogus comment about memory size. + +Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf") +Reported-by: George Anchev +Reported-by: Christopher Snowhill +Signed-off-by: Andi Kleen +Signed-off-by: Thomas Gleixner +Cc: x86@kernel.org +Cc: linux-kernel@vger.kernel.org +Cc: Michael Hocko +Cc: vbabka@suse.cz +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/processor.h | 4 ++- + arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++++++++----- + arch/x86/kernel/cpu/common.c | 1 + 3 files changed, 45 insertions(+), 6 deletions(-) + +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -132,6 +132,8 @@ struct cpuinfo_x86 { + /* Index into per_cpu list: */ + u16 cpu_index; + u32 microcode; ++ /* Address space bits used by the cache internally */ ++ u8 x86_cache_bits; + unsigned initialized : 1; + } __randomize_layout; + +@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x8 + + static inline unsigned long long l1tf_pfn_limit(void) + { +- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); ++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); + } + + extern void early_cpu_init(void); +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + ++/* ++ * These CPUs all support 44bits physical address space internally in the ++ * cache but CPUID can report a smaller number of physical address bits. ++ * ++ * The L1TF mitigation uses the top most address bit for the inversion of ++ * non present PTEs. When the installed memory reaches into the top most ++ * address bit due to memory holes, which has been observed on machines ++ * which report 36bits physical address bits and have 32G RAM installed, ++ * then the mitigation range check in l1tf_select_mitigation() triggers. ++ * This is a false positive because the mitigation is still possible due to ++ * the fact that the cache uses 44bit internally. Use the cache bits ++ * instead of the reported physical bits and adjust them on the affected ++ * machines to 44bit if the reported bits are less than 44. ++ */ ++static void override_cache_bits(struct cpuinfo_x86 *c) ++{ ++ if (c->x86 != 6) ++ return; ++ ++ switch (c->x86_model) { ++ case INTEL_FAM6_NEHALEM: ++ case INTEL_FAM6_WESTMERE: ++ case INTEL_FAM6_SANDYBRIDGE: ++ case INTEL_FAM6_IVYBRIDGE: ++ case INTEL_FAM6_HASWELL_CORE: ++ case INTEL_FAM6_HASWELL_ULT: ++ case INTEL_FAM6_HASWELL_GT3E: ++ case INTEL_FAM6_BROADWELL_CORE: ++ case INTEL_FAM6_BROADWELL_GT3E: ++ case INTEL_FAM6_SKYLAKE_MOBILE: ++ case INTEL_FAM6_SKYLAKE_DESKTOP: ++ case INTEL_FAM6_KABYLAKE_MOBILE: ++ case INTEL_FAM6_KABYLAKE_DESKTOP: ++ if (c->x86_cache_bits < 44) ++ c->x86_cache_bits = 44; ++ break; ++ } ++} ++ + static void __init l1tf_select_mitigation(void) + { + u64 half_pa; +@@ -659,6 +698,8 @@ static void __init l1tf_select_mitigatio + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + ++ override_cache_bits(&boot_cpu_data); ++ + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: +@@ -678,11 +719,6 @@ static void __init l1tf_select_mitigatio + return; + #endif + +- /* +- * This is extremely unlikely to happen because almost all +- * systems have far more MAX_PA/2 than RAM can be fit into +- * DIMM slots. +- */ + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinf + else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) + c->x86_phys_bits = 36; + #endif ++ c->x86_cache_bits = c->x86_phys_bits; + } + + static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) diff --git a/queue-4.18/x86-vdso-fix-lsl-operand-order.patch b/queue-4.18/x86-vdso-fix-lsl-operand-order.patch new file mode 100644 index 00000000000..91d24c8828c --- /dev/null +++ b/queue-4.18/x86-vdso-fix-lsl-operand-order.patch @@ -0,0 +1,36 @@ +From e78e5a91456fcecaa2efbb3706572fe043766f4d Mon Sep 17 00:00:00 2001 +From: Samuel Neves +Date: Sat, 1 Sep 2018 21:14:52 +0100 +Subject: x86/vdso: Fix lsl operand order + +From: Samuel Neves + +commit e78e5a91456fcecaa2efbb3706572fe043766f4d upstream. + +In the __getcpu function, lsl is using the wrong target and destination +registers. Luckily, the compiler tends to choose %eax for both variables, +so it has been working so far. + +Fixes: a582c540ac1b ("x86/vdso: Use RDPID in preference to LSL when available") +Signed-off-by: Samuel Neves +Signed-off-by: Thomas Gleixner +Acked-by: Andy Lutomirski +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20180901201452.27828-1-sneves@dei.uc.pt +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/vgtod.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/x86/include/asm/vgtod.h ++++ b/arch/x86/include/asm/vgtod.h +@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void + * + * If RDPID is available, use it. + */ +- alternative_io ("lsl %[p],%[seg]", ++ alternative_io ("lsl %[seg],%[p]", + ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ + X86_FEATURE_RDPID, + [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));