--- /dev/null
+From 4c4a39dd5fe2d13e2d2fa5fceb8ef95d19fc389a Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Wed, 4 Jul 2018 23:07:45 +0100
+Subject: arm64: Fix mismatched cache line size detection
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 4c4a39dd5fe2d13e2d2fa5fceb8ef95d19fc389a upstream.
+
+If there is a mismatch in the I/D min line size, we must
+always use the system wide safe value both in applications
+and in the kernel, while performing cache operations. However,
+we have been checking more bits than just the min line sizes,
+which triggers false negatives. We may need to trap the user
+accesses in such cases, but not necessarily patch the kernel.
+
+This patch fixes the check to do the right thing as advertised.
+A new capability will be added to check mismatches in other
+fields and ensure we trap the CTR accesses.
+
+Fixes: be68a8aaf925 ("arm64: cpufeature: Fix CTR_EL0 field definitions")
+Cc: <stable@vger.kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cache.h | 5 +++++
+ arch/arm64/kernel/cpu_errata.c | 6 ++++--
+ arch/arm64/kernel/cpufeature.c | 4 ++--
+ 3 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -20,9 +20,14 @@
+
+ #define CTR_L1IP_SHIFT 14
+ #define CTR_L1IP_MASK 3
++#define CTR_DMINLINE_SHIFT 16
++#define CTR_IMINLINE_SHIFT 0
+ #define CTR_CWG_SHIFT 24
+ #define CTR_CWG_MASK 15
+
++#define CTR_CACHE_MINLINE_MASK \
++ (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
++
+ #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+ #define ICACHE_POLICY_VPIPT 0
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -50,9 +50,11 @@ static bool
+ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
+ int scope)
+ {
++ u64 mask = CTR_CACHE_MINLINE_MASK;
++
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
+- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
++ return (read_cpuid_cachetype() & mask) !=
++ (arm64_ftr_reg_ctrel0.sys_val & mask);
+ }
+
+ static int cpu_enable_trap_ctr_access(void *__unused)
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_c
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+ /*
+ * Linux can handle differing I-cache policies. Userspace JITs will
+ * make use of *minLine.
+ * If we have differing I-cache policies, report it as the weakest - VIPT.
+ */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
+ ARM64_FTR_END,
+ };
+
--- /dev/null
+From 314d53d297980676011e6fd83dac60db4a01dc70 Mon Sep 17 00:00:00 2001
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Wed, 4 Jul 2018 23:07:46 +0100
+Subject: arm64: Handle mismatched cache type
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 314d53d297980676011e6fd83dac60db4a01dc70 upstream.
+
+Track mismatches in the cache type register (CTR_EL0), other
+than the D/I min line sizes and trap user accesses if there are any.
+
+Fixes: be68a8aaf925 ("arm64: cpufeature: Fix CTR_EL0 field definitions")
+Cc: <stable@vger.kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/arm64/include/asm/cpucaps.h | 3 ++-
+ arch/arm64/kernel/cpu_errata.c | 17 ++++++++++++++---
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -44,7 +44,8 @@
+ #define ARM64_HARDEN_BRANCH_PREDICTOR 24
+ #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
+ #define ARM64_SSBD 26
++#define ARM64_MISMATCHED_CACHE_TYPE 27
+
+-#define ARM64_NCAPS 27
++#define ARM64_NCAPS 28
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -47,11 +47,15 @@ is_kryo_midr(const struct arm64_cpu_capa
+ }
+
+ static bool
+-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
+- int scope)
++has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
++ int scope)
+ {
+ u64 mask = CTR_CACHE_MINLINE_MASK;
+
++ /* Skip matching the min line sizes for cache type check */
++ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
++ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
++
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return (read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask);
+@@ -515,7 +519,14 @@ const struct arm64_cpu_capabilities arm6
+ {
+ .desc = "Mismatched cache line size",
+ .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
+- .matches = has_mismatched_cache_line_size,
++ .matches = has_mismatched_cache_type,
++ .def_scope = SCOPE_LOCAL_CPU,
++ .enable = cpu_enable_trap_ctr_access,
++ },
++ {
++ .desc = "Mismatched cache type",
++ .capability = ARM64_MISMATCHED_CACHE_TYPE,
++ .matches = has_mismatched_cache_type,
+ .def_scope = SCOPE_LOCAL_CPU,
+ .enable = cpu_enable_trap_ctr_access,
+ },
--- /dev/null
+From ad0eaee6195db1db1749dd46b9e6f4466793d178 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Mon, 6 Aug 2018 07:14:51 -0500
+Subject: ASoC: wm8994: Fix missing break in switch
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit ad0eaee6195db1db1749dd46b9e6f4466793d178 upstream.
+
+Add missing break statement in order to prevent the code from falling
+through to the default case.
+
+Addresses-Coverity-ID: 115050 ("Missing break in switch")
+Reported-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+[Gustavo: Backported to 3.16..4.18 - Remove code comment removal]
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/wm8994.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2431,6 +2431,7 @@ static int wm8994_set_dai_sysclk(struct
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2,
+ WM8994_OPCLK_ENA, 0);
+ }
++ break;
+
+ default:
+ return -EINVAL;