asoc-dpcm-don-t-merge-format-from-invalid-codec-dai.patch
asoc-sirf-fix-potential-null-pointer-dereference.patch
pinctrl-freescale-off-by-one-in-imx1_pinconf_group_dbg_show.patch
+x86-irqflags-mark-native_restore_fl-extern-inline.patch
+x86-spectre-add-missing-family-6-check-to-microcode-check.patch
+x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch
+x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit.patch
--- /dev/null
+From f12d11c5c184626b4befdee3d573ec8237405a33 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Tue, 28 Aug 2018 20:40:33 +0200
+Subject: x86/entry/64: Wipe KASAN stack shadow before rewind_stack_do_exit()
+
+From: Jann Horn <jannh@google.com>
+
+commit f12d11c5c184626b4befdee3d573ec8237405a33 upstream.
+
+Reset the KASAN shadow state of the task stack before rewinding RSP.
+Without this, a kernel oops will leave parts of the stack poisoned, and
+code running under do_exit() can trip over such poisoned regions and cause
+nonsensical false-positive KASAN reports about stack-out-of-bounds bugs.
+
+This does not wipe the exception stacks; if an oops happens on an exception
+stack, it might result in random KASAN false-positives from other tasks
+afterwards. This is probably relatively uninteresting, since if the kernel
+oopses on an exception stack, there are most likely bigger things to worry
+about. It'd be more interesting if vmapped stacks and KASAN were
+compatible, since then handle_stack_overflow() would oops from exception
+stack context.
+
+Fixes: 2deb4be28077 ("x86/dumpstack: When OOPSing, rewind the stack before do_exit()")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kasan-dev@googlegroups.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180828184033.93712-1-jannh@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/dumpstack.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -15,6 +15,7 @@
+ #include <linux/bug.h>
+ #include <linux/nmi.h>
+ #include <linux/sysfs.h>
++#include <linux/kasan.h>
+
+ #include <asm/stacktrace.h>
+ #include <asm/unwind.h>
+@@ -229,7 +230,10 @@ void oops_end(unsigned long flags, struc
+ * We're not going to return, but we might be on an IST stack or
+ * have very little stack space left. Rewind the stack and kill
+ * the task.
++ * Before we rewind the stack, we have to tell KASAN that we're going to
++ * reuse the task stack and that existing poisons are invalid.
+ */
++ kasan_unpoison_task_stack(current);
+ rewind_stack_do_exit(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
--- /dev/null
+From 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Mon, 27 Aug 2018 14:40:09 -0700
+Subject: x86/irqflags: Mark native_restore_fl extern inline
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+commit 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d upstream.
+
+This should have been marked extern inline in order to pick up the out
+of line definition in arch/x86/kernel/irqflags.S.
+
+Fixes: 208cbb325589 ("x86/irqflags: Provide a declaration for native_save_fl")
+Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180827214011.55428-1-ndesaulniers@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/irqflags.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -32,7 +32,8 @@ extern inline unsigned long native_save_
+ return flags;
+ }
+
+-static inline void native_restore_fl(unsigned long flags)
++extern inline void native_restore_fl(unsigned long flags);
++extern inline void native_restore_fl(unsigned long flags)
+ {
+ asm volatile("push %0 ; popf"
+ : /* no output */
--- /dev/null
+From 1ab534e85c93945f7862378d8c8adcf408205b19 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 24 Aug 2018 10:03:51 -0700
+Subject: x86/spectre: Add missing family 6 check to microcode check
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit 1ab534e85c93945f7862378d8c8adcf408205b19 upstream.
+
+The check for Spectre microcodes does not check for family 6, only the
+model numbers.
+
+Add a family 6 check to avoid ambiguity with other families.
+
+Fixes: a5b296636453 ("x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes")
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180824170351.34874-2-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -109,6 +109,9 @@ static bool bad_spectre_microcode(struct
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
++ if (c->x86 != 6)
++ return false;
++
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_stepping == spectre_bad_microcodes[i].stepping)
--- /dev/null
+From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 24 Aug 2018 10:03:50 -0700
+Subject: x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
+
+From: Andi Kleen <ak@linux.intel.com>
+
+commit cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 upstream.
+
+On Nehalem and newer core CPUs the CPU cache internally uses 44 bits
+physical address space. The L1TF workaround is limited by this internal
+cache address width, and needs to have one bit free there for the
+mitigation to work.
+
+Older client systems report only 36bit physical address space so the range
+check decides that L1TF is not mitigated for a 36bit phys/32GB system with
+some memory holes.
+
+But since these actually have the larger internal cache width this warning
+is bogus because it would only really be needed if the system had more than
+43bits of memory.
+
+Add a new internal x86_cache_bits field. Normally it is the same as the
+physical bits field reported by CPUID, but for Nehalem and newerforce it to
+be at least 44bits.
+
+Change the L1TF memory size warning to use the new cache_bits field to
+avoid bogus warnings and remove the bogus comment about memory size.
+
+Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
+Reported-by: George Anchev <studio@anchev.net>
+Reported-by: Christopher Snowhill <kode54@gmail.com>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: Michael Hocko <mhocko@suse.com>
+Cc: vbabka@suse.cz
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/processor.h | 4 ++-
+ arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++++++++-----
+ arch/x86/kernel/cpu/common.c | 1
+ 3 files changed, 45 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -136,6 +136,8 @@ struct cpuinfo_x86 {
+ /* Index into per_cpu list: */
+ u16 cpu_index;
+ u32 microcode;
++ /* Address space bits used by the cache internally */
++ u8 x86_cache_bits;
+ };
+
+ #define X86_VENDOR_INTEL 0
+@@ -175,7 +177,7 @@ extern void cpu_detect(struct cpuinfo_x8
+
+ static inline unsigned long long l1tf_pfn_limit(void)
+ {
+- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
++ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+ }
+
+ extern void early_cpu_init(void);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -651,6 +651,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+
++/*
++ * These CPUs all support 44bits physical address space internally in the
++ * cache but CPUID can report a smaller number of physical address bits.
++ *
++ * The L1TF mitigation uses the top most address bit for the inversion of
++ * non present PTEs. When the installed memory reaches into the top most
++ * address bit due to memory holes, which has been observed on machines
++ * which report 36bits physical address bits and have 32G RAM installed,
++ * then the mitigation range check in l1tf_select_mitigation() triggers.
++ * This is a false positive because the mitigation is still possible due to
++ * the fact that the cache uses 44bit internally. Use the cache bits
++ * instead of the reported physical bits and adjust them on the affected
++ * machines to 44bit if the reported bits are less than 44.
++ */
++static void override_cache_bits(struct cpuinfo_x86 *c)
++{
++ if (c->x86 != 6)
++ return;
++
++ switch (c->x86_model) {
++ case INTEL_FAM6_NEHALEM:
++ case INTEL_FAM6_WESTMERE:
++ case INTEL_FAM6_SANDYBRIDGE:
++ case INTEL_FAM6_IVYBRIDGE:
++ case INTEL_FAM6_HASWELL_CORE:
++ case INTEL_FAM6_HASWELL_ULT:
++ case INTEL_FAM6_HASWELL_GT3E:
++ case INTEL_FAM6_BROADWELL_CORE:
++ case INTEL_FAM6_BROADWELL_GT3E:
++ case INTEL_FAM6_SKYLAKE_MOBILE:
++ case INTEL_FAM6_SKYLAKE_DESKTOP:
++ case INTEL_FAM6_KABYLAKE_MOBILE:
++ case INTEL_FAM6_KABYLAKE_DESKTOP:
++ if (c->x86_cache_bits < 44)
++ c->x86_cache_bits = 44;
++ break;
++ }
++}
++
+ static void __init l1tf_select_mitigation(void)
+ {
+ u64 half_pa;
+@@ -658,6 +697,8 @@ static void __init l1tf_select_mitigatio
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
++ override_cache_bits(&boot_cpu_data);
++
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+@@ -677,11 +718,6 @@ static void __init l1tf_select_mitigatio
+ return;
+ #endif
+
+- /*
+- * This is extremely unlikely to happen because almost all
+- * systems have far more MAX_PA/2 than RAM can be fit into
+- * DIMM slots.
+- */
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -882,6 +882,7 @@ static void identify_cpu_without_cpuid(s
+ }
+ }
+ #endif
++ c->x86_cache_bits = c->x86_phys_bits;
+ }
+
+ static const __initconst struct x86_cpu_id cpu_no_speculation[] = {