]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 Oct 2019 08:04:01 +0000 (10:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 Oct 2019 08:04:01 +0000 (10:04 +0200)
added patches:
arm64-add-sysfs-vulnerability-show-for-meltdown.patch
arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch
arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch
arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch
arm64-always-enable-spectre-v2-vulnerability-detection.patch
arm64-always-enable-ssb-vulnerability-detection.patch
arm64-docs-document-ssbs-hwcap.patch
arm64-enable-generic-cpu-vulnerabilites-support.patch
arm64-fix-ssbs-sanitization.patch
arm64-force-ssbs-on-context-switch.patch
arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch
arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch
arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch

16 files changed:
queue-4.19/arm64-add-sysfs-vulnerability-show-for-meltdown.patch [new file with mode: 0644]
queue-4.19/arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch [new file with mode: 0644]
queue-4.19/arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch [new file with mode: 0644]
queue-4.19/arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch [new file with mode: 0644]
queue-4.19/arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch [new file with mode: 0644]
queue-4.19/arm64-always-enable-spectre-v2-vulnerability-detection.patch [new file with mode: 0644]
queue-4.19/arm64-always-enable-ssb-vulnerability-detection.patch [new file with mode: 0644]
queue-4.19/arm64-docs-document-ssbs-hwcap.patch [new file with mode: 0644]
queue-4.19/arm64-enable-generic-cpu-vulnerabilites-support.patch [new file with mode: 0644]
queue-4.19/arm64-fix-ssbs-sanitization.patch [new file with mode: 0644]
queue-4.19/arm64-force-ssbs-on-context-switch.patch [new file with mode: 0644]
queue-4.19/arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch [new file with mode: 0644]
queue-4.19/arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch [new file with mode: 0644]
queue-4.19/arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch [new file with mode: 0644]
queue-4.19/kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/arm64-add-sysfs-vulnerability-show-for-meltdown.patch b/queue-4.19/arm64-add-sysfs-vulnerability-show-for-meltdown.patch
new file mode 100644 (file)
index 0000000..9c9d24e
--- /dev/null
@@ -0,0 +1,149 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:21 +0200
+Subject: arm64: add sysfs vulnerability show for meltdown
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Jeremy Linton <jeremy.linton@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-8-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit 1b3ccf4be0e7be8c4bd8522066b6cbc92591e912 ]
+
+We implement page table isolation as a mitigation for meltdown.
+Report this to userspace via sysfs.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   58 +++++++++++++++++++++++++++++++----------
+ 1 file changed, 44 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -889,7 +889,7 @@ static bool has_cache_dic(const struct a
+       return ctr & BIT(CTR_DIC_SHIFT);
+ }
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++static bool __meltdown_safe = true;
+ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+@@ -908,6 +908,16 @@ static bool unmap_kernel_at_el0(const st
+               { /* sentinel */ }
+       };
+       char const *str = "command line option";
++      bool meltdown_safe;
++
++      meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
++
++      /* Defer to CPU feature registers */
++      if (has_cpuid_feature(entry, scope))
++              meltdown_safe = true;
++
++      if (!meltdown_safe)
++              __meltdown_safe = false;
+       /*
+        * For reasons that aren't entirely clear, enabling KPTI on Cavium
+@@ -919,6 +929,19 @@ static bool unmap_kernel_at_el0(const st
+               __kpti_forced = -1;
+       }
++      /* Useful for KASLR robustness */
++      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
++              if (!__kpti_forced) {
++                      str = "KASLR";
++                      __kpti_forced = 1;
++              }
++      }
++
++      if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
++              pr_info_once("kernel page table isolation disabled by kernel configuration\n");
++              return false;
++      }
++
+       /* Forced? */
+       if (__kpti_forced) {
+               pr_info_once("kernel page table isolation forced %s by %s\n",
+@@ -926,18 +949,10 @@ static bool unmap_kernel_at_el0(const st
+               return __kpti_forced > 0;
+       }
+-      /* Useful for KASLR robustness */
+-      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+-              return true;
+-
+-      /* Don't force KPTI for CPUs that are not vulnerable */
+-      if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+-              return false;
+-
+-      /* Defer to CPU feature registers */
+-      return !has_cpuid_feature(entry, scope);
++      return !meltdown_safe;
+ }
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ static void
+ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+ {
+@@ -962,6 +977,12 @@ kpti_install_ng_mappings(const struct ar
+       return;
+ }
++#else
++static void
++kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
++{
++}
++#endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ static int __init parse_kpti(char *str)
+ {
+@@ -975,7 +996,6 @@ static int __init parse_kpti(char *str)
+       return 0;
+ }
+ early_param("kpti", parse_kpti);
+-#endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ #ifdef CONFIG_ARM64_HW_AFDBM
+ static inline void __cpu_enable_hw_dbm(void)
+@@ -1196,7 +1216,6 @@ static const struct arm64_cpu_capabiliti
+               .field_pos = ID_AA64PFR0_EL0_SHIFT,
+               .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+       },
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       {
+               .desc = "Kernel page table isolation (KPTI)",
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+@@ -1212,7 +1231,6 @@ static const struct arm64_cpu_capabiliti
+               .matches = unmap_kernel_at_el0,
+               .cpu_enable = kpti_install_ng_mappings,
+       },
+-#endif
+       {
+               /* FP/SIMD is not implemented */
+               .capability = ARM64_HAS_NO_FPSIMD,
+@@ -1853,3 +1871,15 @@ void cpu_clear_disr(const struct arm64_c
+       /* Firmware may have left a deferred SError in this register. */
+       write_sysreg_s(0, SYS_DISR_EL1);
+ }
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
++                        char *buf)
++{
++      if (__meltdown_safe)
++              return sprintf(buf, "Not affected\n");
++
++      if (arm64_kernel_unmapped_at_el0())
++              return sprintf(buf, "Mitigation: PTI\n");
++
++      return sprintf(buf, "Vulnerable\n");
++}
diff --git a/queue-4.19/arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch b/queue-4.19/arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch
new file mode 100644 (file)
index 0000000..1f44625
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:20 +0200
+Subject: arm64: Add sysfs vulnerability show for spectre-v1
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Mian Yousaf Kaukab <ykaukab@suse.de>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-7-ard.biesheuvel@linaro.org>
+
+From: Mian Yousaf Kaukab <ykaukab@suse.de>
+
+[ Upstream commit 3891ebccace188af075ce143d8b072b65e90f695 ]
+
+spectre-v1 has been mitigated and the mitigation is always active.
+Report this to userspace via sysfs
+
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Acked-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -729,3 +729,9 @@ const struct arm64_cpu_capabilities arm6
+       {
+       }
+ };
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
diff --git a/queue-4.19/arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch b/queue-4.19/arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch
new file mode 100644 (file)
index 0000000..9ac7ca2
--- /dev/null
@@ -0,0 +1,92 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:27 +0200
+Subject: arm64: add sysfs vulnerability show for spectre-v2
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-14-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit d2532e27b5638bb2e2dd52b80b7ea2ec65135377 ]
+
+Track whether all the cores in the machine are vulnerable to Spectre-v2,
+and whether all the vulnerable cores have been mitigated. We then expose
+this information to userspace via sysfs.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -480,6 +480,10 @@ has_cortex_a76_erratum_1463225(const str
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
+       CAP_MIDR_RANGE_LIST(midr_list)
++/* Track overall mitigation state. We are only mitigated if all cores are ok */
++static bool __hardenbp_enab = true;
++static bool __spectrev2_safe = true;
++
+ /*
+  * Generic helper for handling capabilties with multiple (match,enable) pairs
+  * of call backs, sharing the same capability bit.
+@@ -522,6 +526,10 @@ static const struct midr_range spectre_v
+       { /* sentinel */ }
+ };
++/*
++ * Track overall bp hardening for all heterogeneous cores in the machine.
++ * We are only considered "safe" if all booted cores are known safe.
++ */
+ static bool __maybe_unused
+ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+ {
+@@ -543,6 +551,8 @@ check_branch_predictor(const struct arm6
+       if (!need_wa)
+               return false;
++      __spectrev2_safe = false;
++
+       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+               __hardenbp_enab = false;
+@@ -552,11 +562,14 @@ check_branch_predictor(const struct arm6
+       /* forced off */
+       if (__nospectre_v2) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
++              __hardenbp_enab = false;
+               return false;
+       }
+-      if (need_wa < 0)
++      if (need_wa < 0) {
+               pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
++              __hardenbp_enab = false;
++      }
+       return (need_wa > 0);
+ }
+@@ -753,3 +766,15 @@ ssize_t cpu_show_spectre_v1(struct devic
+ {
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ }
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
++              char *buf)
++{
++      if (__spectrev2_safe)
++              return sprintf(buf, "Not affected\n");
++
++      if (__hardenbp_enab)
++              return sprintf(buf, "Mitigation: Branch predictor hardening\n");
++
++      return sprintf(buf, "Vulnerable\n");
++}
diff --git a/queue-4.19/arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch b/queue-4.19/arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
new file mode 100644 (file)
index 0000000..66dc89d
--- /dev/null
@@ -0,0 +1,148 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:28 +0200
+Subject: arm64: add sysfs vulnerability show for speculative store bypass
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Jeremy Linton <jeremy.linton@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191008153930.15386-15-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit 526e065dbca6df0b5a130b84b836b8b3c9f54e21 ]
+
+Return status based on ssbd_state and __ssb_safe. If the
+mitigation is disabled, or the firmware isn't responding then
+return the expected machine state based on a whitelist of known
+good cores.
+
+Given a heterogeneous machine, the overall machine vulnerability
+defaults to safe but is reset to unsafe when we miss the whitelist
+and the firmware doesn't explicitly tell us the core is safe.
+In order to make that work we delay transitioning to vulnerable
+until we know the firmware isn't responding to avoid a case
+where we miss the whitelist, but the firmware goes ahead and
+reports the core is not vulnerable. If all the cores in the
+machine have SSBS, then __ssb_safe will remain true.
+
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   42 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -233,6 +233,7 @@ static int detect_harden_bp_fw(void)
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+ int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
++static bool __ssb_safe = true;
+ static const struct ssbd_options {
+       const char      *str;
+@@ -336,6 +337,7 @@ static bool has_ssbd_mitigation(const st
+       struct arm_smccc_res res;
+       bool required = true;
+       s32 val;
++      bool this_cpu_safe = false;
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+@@ -344,8 +346,14 @@ static bool has_ssbd_mitigation(const st
+               goto out_printmsg;
+       }
++      /* delay setting __ssb_safe until we get a firmware response */
++      if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
++              this_cpu_safe = true;
++
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+               ssbd_state = ARM64_SSBD_UNKNOWN;
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
+       }
+@@ -362,6 +370,8 @@ static bool has_ssbd_mitigation(const st
+       default:
+               ssbd_state = ARM64_SSBD_UNKNOWN;
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
+       }
+@@ -370,14 +380,18 @@ static bool has_ssbd_mitigation(const st
+       switch (val) {
+       case SMCCC_RET_NOT_SUPPORTED:
+               ssbd_state = ARM64_SSBD_UNKNOWN;
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
++      /* machines with mixed mitigation requirements must not return this */
+       case SMCCC_RET_NOT_REQUIRED:
+               pr_info_once("%s mitigation not required\n", entry->desc);
+               ssbd_state = ARM64_SSBD_MITIGATED;
+               return false;
+       case SMCCC_RET_SUCCESS:
++              __ssb_safe = false;
+               required = true;
+               break;
+@@ -387,6 +401,8 @@ static bool has_ssbd_mitigation(const st
+       default:
+               WARN_ON(1);
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
+       }
+@@ -427,6 +443,14 @@ out_printmsg:
+       return required;
+ }
++/* known invulnerable cores */
++static const struct midr_range arm64_ssb_cpus[] = {
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
++      {},
++};
++
+ #ifdef CONFIG_ARM64_ERRATUM_1463225
+ DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+@@ -748,6 +772,7 @@ const struct arm64_cpu_capabilities arm6
+               .capability = ARM64_SSBD,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = has_ssbd_mitigation,
++              .midr_range_list = arm64_ssb_cpus,
+       },
+ #ifdef CONFIG_ARM64_ERRATUM_1463225
+       {
+@@ -778,3 +803,20 @@ ssize_t cpu_show_spectre_v2(struct devic
+       return sprintf(buf, "Vulnerable\n");
+ }
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev,
++              struct device_attribute *attr, char *buf)
++{
++      if (__ssb_safe)
++              return sprintf(buf, "Not affected\n");
++
++      switch (ssbd_state) {
++      case ARM64_SSBD_KERNEL:
++      case ARM64_SSBD_FORCE_ENABLE:
++              if (IS_ENABLED(CONFIG_ARM64_SSBD))
++                      return sprintf(buf,
++                          "Mitigation: Speculative Store Bypass disabled via prctl\n");
++      }
++
++      return sprintf(buf, "Vulnerable\n");
++}
diff --git a/queue-4.19/arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch b/queue-4.19/arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch
new file mode 100644 (file)
index 0000000..cb36058
--- /dev/null
@@ -0,0 +1,224 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:25 +0200
+Subject: arm64: Advertise mitigation of Spectre-v2, or lack thereof
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Marc Zyngier <marc.zyngier@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-12-ard.biesheuvel@linaro.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 73f38166095947f3b86b02fbed6bd592223a7ac8 ]
+
+We currently have a list of CPUs affected by Spectre-v2, for which
+we check that the firmware implements ARCH_WORKAROUND_1. It turns
+out that not all firmwares do implement the required mitigation,
+and that we fail to let the user know about it.
+
+Instead, let's slightly revamp our checks, and rely on a whitelist
+of cores that are known to be non-vulnerable, and let the user know
+the status of the mitigation in the kernel log.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |  109 +++++++++++++++++++++--------------------
+ 1 file changed, 56 insertions(+), 53 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -109,9 +109,9 @@ static void __copy_hyp_vect_bpi(int slot
+       __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+ }
+-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+-                                    const char *hyp_vecs_start,
+-                                    const char *hyp_vecs_end)
++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
++                                  const char *hyp_vecs_start,
++                                  const char *hyp_vecs_end)
+ {
+       static DEFINE_SPINLOCK(bp_lock);
+       int cpu, slot = -1;
+@@ -138,7 +138,7 @@ static void __install_bp_hardening_cb(bp
+ #define __smccc_workaround_1_smc_start                NULL
+ #define __smccc_workaround_1_smc_end          NULL
+-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+                                     const char *hyp_vecs_end)
+ {
+@@ -146,23 +146,6 @@ static void __install_bp_hardening_cb(bp
+ }
+ #endif        /* CONFIG_KVM_INDIRECT_VECTORS */
+-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+-                                   bp_hardening_cb_t fn,
+-                                   const char *hyp_vecs_start,
+-                                   const char *hyp_vecs_end)
+-{
+-      u64 pfr0;
+-
+-      if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+-              return;
+-
+-      pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+-      if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+-              return;
+-
+-      __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+-}
+-
+ #include <uapi/linux/psci.h>
+ #include <linux/arm-smccc.h>
+ #include <linux/psci.h>
+@@ -197,31 +180,27 @@ static int __init parse_nospectre_v2(cha
+ }
+ early_param("nospectre_v2", parse_nospectre_v2);
+-static void
+-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
++/*
++ * -1: No workaround
++ *  0: No workaround required
++ *  1: Workaround installed
++ */
++static int detect_harden_bp_fw(void)
+ {
+       bp_hardening_cb_t cb;
+       void *smccc_start, *smccc_end;
+       struct arm_smccc_res res;
+       u32 midr = read_cpuid_id();
+-      if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+-              return;
+-
+-      if (__nospectre_v2) {
+-              pr_info_once("spectrev2 mitigation disabled by command line option\n");
+-              return;
+-      }
+-
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+-              return;
++              return -1;
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if ((int)res.a0 < 0)
+-                      return;
++                      return -1;
+               cb = call_hvc_arch_workaround_1;
+               /* This is a guest, no need to patch KVM vectors */
+               smccc_start = NULL;
+@@ -232,23 +211,23 @@ enable_smccc_arch_workaround_1(const str
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if ((int)res.a0 < 0)
+-                      return;
++                      return -1;
+               cb = call_smc_arch_workaround_1;
+               smccc_start = __smccc_workaround_1_smc_start;
+               smccc_end = __smccc_workaround_1_smc_end;
+               break;
+       default:
+-              return;
++              return -1;
+       }
+       if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+           ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+               cb = qcom_link_stack_sanitization;
+-      install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
++      install_bp_hardening_cb(cb, smccc_start, smccc_end);
+-      return;
++      return 1;
+ }
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+@@ -535,24 +514,48 @@ multi_entry_cap_cpu_enable(const struct
+ }
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+-
+ /*
+- * List of CPUs where we need to issue a psci call to
+- * harden the branch predictor.
++ * List of CPUs that do not need any Spectre-v2 mitigation at all.
+  */
+-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+-      MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+-      MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+-      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+-      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+-      MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
+-      {},
++static const struct midr_range spectre_v2_safe_list[] = {
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
++      { /* sentinel */ }
+ };
++static bool __maybe_unused
++check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
++{
++      int need_wa;
++
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++      /* If the CPU has CSV2 set, we're safe */
++      if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
++                                               ID_AA64PFR0_CSV2_SHIFT))
++              return false;
++
++      /* Alternatively, we have a list of unaffected CPUs */
++      if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
++              return false;
++
++      /* Fallback to firmware detection */
++      need_wa = detect_harden_bp_fw();
++      if (!need_wa)
++              return false;
++
++      /* forced off */
++      if (__nospectre_v2) {
++              pr_info_once("spectrev2 mitigation disabled by command line option\n");
++              return false;
++      }
++
++      if (need_wa < 0)
++              pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
++
++      return (need_wa > 0);
++}
+ #endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+@@ -715,8 +718,8 @@ const struct arm64_cpu_capabilities arm6
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              .cpu_enable = enable_smccc_arch_workaround_1,
+-              ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++              .matches = check_branch_predictor,
+       },
+ #endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
diff --git a/queue-4.19/arm64-always-enable-spectre-v2-vulnerability-detection.patch b/queue-4.19/arm64-always-enable-spectre-v2-vulnerability-detection.patch
new file mode 100644 (file)
index 0000000..49d515d
--- /dev/null
@@ -0,0 +1,93 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:26 +0200
+Subject: arm64: Always enable spectre-v2 vulnerability detection
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-13-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit 8c1e3d2bb44cbb998cb28ff9a18f105fee7f1eb3 ]
+
+Ensure we are always able to detect whether or not the CPU is affected
+by Spectre-v2, so that we can later advertise this to userspace.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -87,7 +87,6 @@ cpu_enable_trap_ctr_access(const struct
+ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ #include <asm/mmu_context.h>
+ #include <asm/cacheflush.h>
+@@ -225,11 +224,11 @@ static int detect_harden_bp_fw(void)
+           ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+               cb = qcom_link_stack_sanitization;
+-      install_bp_hardening_cb(cb, smccc_start, smccc_end);
++      if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
++              install_bp_hardening_cb(cb, smccc_start, smccc_end);
+       return 1;
+ }
+-#endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+@@ -513,7 +512,6 @@ multi_entry_cap_cpu_enable(const struct
+                       caps->cpu_enable(caps);
+ }
+-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ /*
+  * List of CPUs that do not need any Spectre-v2 mitigation at all.
+  */
+@@ -545,6 +543,12 @@ check_branch_predictor(const struct arm6
+       if (!need_wa)
+               return false;
++      if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
++              pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
++              __hardenbp_enab = false;
++              return false;
++      }
++
+       /* forced off */
+       if (__nospectre_v2) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
+@@ -556,7 +560,6 @@ check_branch_predictor(const struct arm6
+       return (need_wa > 0);
+ }
+-#endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+@@ -715,13 +718,11 @@ const struct arm64_cpu_capabilities arm6
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       },
+ #endif
+-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = check_branch_predictor,
+       },
+-#endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+       {
+               .desc = "EL2 vector hardening",
diff --git a/queue-4.19/arm64-always-enable-ssb-vulnerability-detection.patch b/queue-4.19/arm64-always-enable-ssb-vulnerability-detection.patch
new file mode 100644 (file)
index 0000000..df4105d
--- /dev/null
@@ -0,0 +1,87 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:23 +0200
+Subject: arm64: Always enable ssb vulnerability detection
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-10-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit d42281b6e49510f078ace15a8ea10f71e6262581 ]
+
+Ensure we are always able to detect whether or not the CPU is affected
+by SSB, so that we can later advertise this to userspace.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+[will: Use IS_ENABLED instead of #ifdef]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    4 ----
+ arch/arm64/kernel/cpu_errata.c      |    9 +++++----
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(v
+ #endif
+ }
+-#ifdef CONFIG_ARM64_SSBD
+ void arm64_set_ssbd_mitigation(bool state);
+-#else
+-static inline void arm64_set_ssbd_mitigation(bool state) {}
+-#endif
+ #endif /* __ASSEMBLY__ */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -239,7 +239,6 @@ enable_smccc_arch_workaround_1(const str
+ }
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+-#ifdef CONFIG_ARM64_SSBD
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+ int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+@@ -312,6 +311,11 @@ void __init arm64_enable_wa2_handling(st
+ void arm64_set_ssbd_mitigation(bool state)
+ {
++      if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
++              pr_info_once("SSBD disabled by kernel configuration\n");
++              return;
++      }
++
+       if (this_cpu_has_cap(ARM64_SSBS)) {
+               if (state)
+                       asm volatile(SET_PSTATE_SSBS(0));
+@@ -431,7 +435,6 @@ out_printmsg:
+       return required;
+ }
+-#endif        /* CONFIG_ARM64_SSBD */
+ #ifdef CONFIG_ARM64_ERRATUM_1463225
+ DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+@@ -710,14 +713,12 @@ const struct arm64_cpu_capabilities arm6
+               ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
+       },
+ #endif
+-#ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypass Disable",
+               .capability = ARM64_SSBD,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = has_ssbd_mitigation,
+       },
+-#endif
+ #ifdef CONFIG_ARM64_ERRATUM_1463225
+       {
+               .desc = "ARM erratum 1463225",
diff --git a/queue-4.19/arm64-docs-document-ssbs-hwcap.patch b/queue-4.19/arm64-docs-document-ssbs-hwcap.patch
new file mode 100644 (file)
index 0000000..4b86dbf
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:18 +0200
+Subject: arm64: docs: Document SSBS HWCAP
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-5-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit ee91176120bd584aa10c564e7e9fdcaf397190a1 ]
+
+We advertise the MRS/MSR instructions for toggling SSBS at EL0 using an
+HWCAP, so document it along with the others.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/elf_hwcaps.txt |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/Documentation/arm64/elf_hwcaps.txt
++++ b/Documentation/arm64/elf_hwcaps.txt
+@@ -178,3 +178,7 @@ HWCAP_ILRCPC
+ HWCAP_FLAGM
+     Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
++
++HWCAP_SSBS
++
++    Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
diff --git a/queue-4.19/arm64-enable-generic-cpu-vulnerabilites-support.patch b/queue-4.19/arm64-enable-generic-cpu-vulnerabilites-support.patch
new file mode 100644 (file)
index 0000000..654e6d8
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:22 +0200
+Subject: arm64: enable generic CPU vulnerabilites support
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Mian Yousaf Kaukab <ykaukab@suse.de>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-9-ard.biesheuvel@linaro.org>
+
+From: Mian Yousaf Kaukab <ykaukab@suse.de>
+
+[ Upstream commit 61ae1321f06c4489c724c803e9b8363dea576da3 ]
+
+Enable CPU vulnerabilty show functions for spectre_v1, spectre_v2,
+meltdown and store-bypass.
+
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -84,6 +84,7 @@ config ARM64
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CLOCKEVENTS_BROADCAST
+       select GENERIC_CPU_AUTOPROBE
++      select GENERIC_CPU_VULNERABILITIES
+       select GENERIC_EARLY_IOREMAP
+       select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IRQ_MULTI_HANDLER
diff --git a/queue-4.19/arm64-fix-ssbs-sanitization.patch b/queue-4.19/arm64-fix-ssbs-sanitization.patch
new file mode 100644 (file)
index 0000000..1627151
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:19 +0200
+Subject: arm64: fix SSBS sanitization
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Mark Rutland <mark.rutland@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-6-ard.biesheuvel@linaro.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit f54dada8274643e3ff4436df0ea124aeedc43cae ]
+
+In valid_user_regs() we treat SSBS as a RES0 bit, and consequently it is
+unexpectedly cleared when we restore a sigframe or fiddle with GPRs via
+ptrace.
+
+This patch fixes valid_user_regs() to account for this, updating the
+function to refer to the latest ARM ARM (ARM DDI 0487D.a). For AArch32
+tasks, SSBS appears in bit 23 of SPSR_EL1, matching its position in the
+AArch32-native PSR format, and we don't need to translate it as we have
+to for DIT.
+
+There are no other bit assignments that we need to account for today.
+As the recent documentation describes the DIT bit, we can drop our
+comment regarding DIT.
+
+While removing SSBS from the RES0 masks, existing inconsistent
+whitespace is corrected.
+
+Fixes: d71be2b6c0e19180 ("arm64: cpufeature: Detect SSBS and advertise to userspace")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/ptrace.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1666,19 +1666,20 @@ void syscall_trace_exit(struct pt_regs *
+ }
+ /*
+- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
+- * We also take into account DIT (bit 24), which is not yet documented, and
+- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
+- * allocated an EL0 meaning in future.
++ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
++ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
++ * not described in ARM DDI 0487D.a.
++ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
++ * be allocated an EL0 meaning in future.
+  * Userspace cannot use these until they have an architectural meaning.
+  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
+  * We also reserve IL for the kernel; SS is handled dynamically.
+  */
+ #define SPSR_EL1_AARCH64_RES0_BITS \
+-      (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+-       GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
++      (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
++       GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
+ #define SPSR_EL1_AARCH32_RES0_BITS \
+-      (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
++      (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
+ static int valid_compat_regs(struct user_pt_regs *regs)
+ {
diff --git a/queue-4.19/arm64-force-ssbs-on-context-switch.patch b/queue-4.19/arm64-force-ssbs-on-context-switch.patch
new file mode 100644 (file)
index 0000000..aff1fdd
--- /dev/null
@@ -0,0 +1,118 @@
+From foo@baz Wed 09 Oct 2019 10:02:12 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:30 +0200
+Subject: arm64: Force SSBS on context switch
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Marc Zyngier <marc.zyngier@arm.com>, Will Deacon <will@kernel.org>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-17-ard.biesheuvel@linaro.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit cbdf8a189a66001c36007bf0f5c975d0376c5c3a ]
+
+On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
+where only some of the CPUs implement SSBS, we end-up losing track of
+the SSBS bit across task migration.
+
+To address this issue, let's force the SSBS bit on context switch.
+
+Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+[will: inverted logic and added comments]
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/processor.h |   14 ++++++++++++--
+ arch/arm64/kernel/process.c        |   29 ++++++++++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -177,6 +177,16 @@ static inline void start_thread_common(s
+       regs->pc = pc;
+ }
++static inline void set_ssbs_bit(struct pt_regs *regs)
++{
++      regs->pstate |= PSR_SSBS_BIT;
++}
++
++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
++{
++      regs->pstate |= PSR_AA32_SSBS_BIT;
++}
++
+ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+                               unsigned long sp)
+ {
+@@ -184,7 +194,7 @@ static inline void start_thread(struct p
+       regs->pstate = PSR_MODE_EL0t;
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+-              regs->pstate |= PSR_SSBS_BIT;
++              set_ssbs_bit(regs);
+       regs->sp = sp;
+ }
+@@ -203,7 +213,7 @@ static inline void compat_start_thread(s
+ #endif
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+-              regs->pstate |= PSR_AA32_SSBS_BIT;
++              set_compat_ssbs_bit(regs);
+       regs->compat_sp = sp;
+ }
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -360,7 +360,7 @@ int copy_thread(unsigned long clone_flag
+                       childregs->pstate |= PSR_UAO_BIT;
+               if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+-                      childregs->pstate |= PSR_SSBS_BIT;
++                      set_ssbs_bit(childregs);
+               p->thread.cpu_context.x19 = stack_start;
+               p->thread.cpu_context.x20 = stk_sz;
+@@ -402,6 +402,32 @@ void uao_thread_switch(struct task_struc
+ }
+ /*
++ * Force SSBS state on context-switch, since it may be lost after migrating
++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
++ */
++static void ssbs_thread_switch(struct task_struct *next)
++{
++      struct pt_regs *regs = task_pt_regs(next);
++
++      /*
++       * Nothing to do for kernel threads, but 'regs' may be junk
++       * (e.g. idle task) so check the flags and bail early.
++       */
++      if (unlikely(next->flags & PF_KTHREAD))
++              return;
++
++      /* If the mitigation is enabled, then we leave SSBS clear. */
++      if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
++          test_tsk_thread_flag(next, TIF_SSBD))
++              return;
++
++      if (compat_user_mode(regs))
++              set_compat_ssbs_bit(regs);
++      else if (user_mode(regs))
++              set_ssbs_bit(regs);
++}
++
++/*
+  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+  * shadow copy so that we can restore this upon entry from userspace.
+  *
+@@ -429,6 +455,7 @@ __notrace_funcgraph struct task_struct *
+       contextidr_thread_switch(next);
+       entry_task_switch(next);
+       uao_thread_switch(next);
++      ssbs_thread_switch(next);
+       /*
+        * Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/queue-4.19/arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch b/queue-4.19/arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch
new file mode 100644 (file)
index 0000000..7cbde55
--- /dev/null
@@ -0,0 +1,76 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:24 +0200
+Subject: arm64: Provide a command line to disable spectre_v2 mitigation
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Jeremy Linton <jeremy.linton@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Andre Przywara <andre.przywara@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Jonathan Corbet <corbet@lwn.net>, linux-doc@vger.kernel.org, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-11-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit e5ce5e7267ddcbe13ab9ead2542524e1b7993e5a ]
+
+There are various reasons, such as benchmarking, to disable spectrev2
+mitigation on a machine. Provide a command-line option to do so.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: linux-doc@vger.kernel.org
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt |    8 ++++----
+ arch/arm64/kernel/cpu_errata.c                  |   13 +++++++++++++
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2866,10 +2866,10 @@
+                       (bounds check bypass). With this option data leaks
+                       are possible in the system.
+-      nospectre_v2    [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
+-                      (indirect branch prediction) vulnerability. System may
+-                      allow data leaks with this option, which is equivalent
+-                      to spectre_v2=off.
++      nospectre_v2    [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
++                      the Spectre variant 2 (indirect branch prediction)
++                      vulnerability. System may allow data leaks with this
++                      option.
+       nospec_store_bypass_disable
+                       [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -189,6 +189,14 @@ static void qcom_link_stack_sanitization
+                    : "=&r" (tmp));
+ }
++static bool __nospectre_v2;
++static int __init parse_nospectre_v2(char *str)
++{
++      __nospectre_v2 = true;
++      return 0;
++}
++early_param("nospectre_v2", parse_nospectre_v2);
++
+ static void
+ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ {
+@@ -200,6 +208,11 @@ enable_smccc_arch_workaround_1(const str
+       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+               return;
++      if (__nospectre_v2) {
++              pr_info_once("spectrev2 mitigation disabled by command line option\n");
++              return;
++      }
++
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+               return;
diff --git a/queue-4.19/arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch b/queue-4.19/arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch
new file mode 100644 (file)
index 0000000..3823228
--- /dev/null
@@ -0,0 +1,297 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:16 +0200
+Subject: arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Will Deacon <will.deacon@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-3-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit 8f04e8e6e29c93421a95b61cad62e3918425eac7 ]
+
+On CPUs with support for PSTATE.SSBS, the kernel can toggle the SSBD
+state without needing to call into firmware.
+
+This patch hooks into the existing SSBD infrastructure so that SSBS is
+used on CPUs that support it, but it's all made horribly complicated by
+the very real possibility of big/little systems that don't uniformly
+provide the new capability.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/processor.h   |    7 +++++
+ arch/arm64/include/asm/ptrace.h      |    1 
+ arch/arm64/include/asm/sysreg.h      |    3 ++
+ arch/arm64/include/uapi/asm/ptrace.h |    1 
+ arch/arm64/kernel/cpu_errata.c       |   26 ++++++++++++++++++--
+ arch/arm64/kernel/cpufeature.c       |   45 +++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/process.c          |    4 +++
+ arch/arm64/kernel/ssbd.c             |   21 ++++++++++++++++
+ 8 files changed, 106 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -182,6 +182,10 @@ static inline void start_thread(struct p
+ {
+       start_thread_common(regs, pc);
+       regs->pstate = PSR_MODE_EL0t;
++
++      if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
++              regs->pstate |= PSR_SSBS_BIT;
++
+       regs->sp = sp;
+ }
+@@ -198,6 +202,9 @@ static inline void compat_start_thread(s
+       regs->pstate |= PSR_AA32_E_BIT;
+ #endif
++      if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
++              regs->pstate |= PSR_AA32_SSBS_BIT;
++
+       regs->compat_sp = sp;
+ }
+ #endif
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -50,6 +50,7 @@
+ #define PSR_AA32_I_BIT                0x00000080
+ #define PSR_AA32_A_BIT                0x00000100
+ #define PSR_AA32_E_BIT                0x00000200
++#define PSR_AA32_SSBS_BIT     0x00800000
+ #define PSR_AA32_DIT_BIT      0x01000000
+ #define PSR_AA32_Q_BIT                0x08000000
+ #define PSR_AA32_V_BIT                0x10000000
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -86,11 +86,14 @@
+ #define REG_PSTATE_PAN_IMM            sys_reg(0, 0, 4, 0, 4)
+ #define REG_PSTATE_UAO_IMM            sys_reg(0, 0, 4, 0, 3)
++#define REG_PSTATE_SSBS_IMM           sys_reg(0, 3, 4, 0, 1)
+ #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM |       \
+                                     (!!x)<<8 | 0x1f)
+ #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM |       \
+                                     (!!x)<<8 | 0x1f)
++#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
++                                     (!!x)<<8 | 0x1f)
+ #define SYS_DC_ISW                    sys_insn(1, 0, 7, 6, 2)
+ #define SYS_DC_CSW                    sys_insn(1, 0, 7, 10, 2)
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -46,6 +46,7 @@
+ #define PSR_I_BIT     0x00000080
+ #define PSR_A_BIT     0x00000100
+ #define PSR_D_BIT     0x00000200
++#define PSR_SSBS_BIT  0x00001000
+ #define PSR_PAN_BIT   0x00400000
+ #define PSR_UAO_BIT   0x00800000
+ #define PSR_V_BIT     0x10000000
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -312,6 +312,14 @@ void __init arm64_enable_wa2_handling(st
+ void arm64_set_ssbd_mitigation(bool state)
+ {
++      if (this_cpu_has_cap(ARM64_SSBS)) {
++              if (state)
++                      asm volatile(SET_PSTATE_SSBS(0));
++              else
++                      asm volatile(SET_PSTATE_SSBS(1));
++              return;
++      }
++
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+@@ -336,6 +344,11 @@ static bool has_ssbd_mitigation(const st
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++      if (this_cpu_has_cap(ARM64_SSBS)) {
++              required = false;
++              goto out_printmsg;
++      }
++
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+               ssbd_state = ARM64_SSBD_UNKNOWN;
+               return false;
+@@ -384,7 +397,6 @@ static bool has_ssbd_mitigation(const st
+       switch (ssbd_state) {
+       case ARM64_SSBD_FORCE_DISABLE:
+-              pr_info_once("%s disabled from command-line\n", entry->desc);
+               arm64_set_ssbd_mitigation(false);
+               required = false;
+               break;
+@@ -397,7 +409,6 @@ static bool has_ssbd_mitigation(const st
+               break;
+       case ARM64_SSBD_FORCE_ENABLE:
+-              pr_info_once("%s forced from command-line\n", entry->desc);
+               arm64_set_ssbd_mitigation(true);
+               required = true;
+               break;
+@@ -407,6 +418,17 @@ static bool has_ssbd_mitigation(const st
+               break;
+       }
++out_printmsg:
++      switch (ssbd_state) {
++      case ARM64_SSBD_FORCE_DISABLE:
++              pr_info_once("%s disabled from command-line\n", entry->desc);
++              break;
++
++      case ARM64_SSBD_FORCE_ENABLE:
++              pr_info_once("%s forced from command-line\n", entry->desc);
++              break;
++      }
++
+       return required;
+ }
+ #endif        /* CONFIG_ARM64_SSBD */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1071,6 +1071,48 @@ static void cpu_has_fwb(const struct arm
+       WARN_ON(val & (7 << 27 | 7 << 21));
+ }
++#ifdef CONFIG_ARM64_SSBD
++static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
++{
++      if (user_mode(regs))
++              return 1;
++
++      if (instr & BIT(CRm_shift))
++              regs->pstate |= PSR_SSBS_BIT;
++      else
++              regs->pstate &= ~PSR_SSBS_BIT;
++
++      arm64_skip_faulting_instruction(regs, 4);
++      return 0;
++}
++
++static struct undef_hook ssbs_emulation_hook = {
++      .instr_mask     = ~(1U << CRm_shift),
++      .instr_val      = 0xd500001f | REG_PSTATE_SSBS_IMM,
++      .fn             = ssbs_emulation_handler,
++};
++
++static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
++{
++      static bool undef_hook_registered = false;
++      static DEFINE_SPINLOCK(hook_lock);
++
++      spin_lock(&hook_lock);
++      if (!undef_hook_registered) {
++              register_undef_hook(&ssbs_emulation_hook);
++              undef_hook_registered = true;
++      }
++      spin_unlock(&hook_lock);
++
++      if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
++              sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
++              arm64_set_ssbd_mitigation(false);
++      } else {
++              arm64_set_ssbd_mitigation(true);
++      }
++}
++#endif /* CONFIG_ARM64_SSBD */
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+       {
+               .desc = "GIC system register CPU interface",
+@@ -1258,6 +1300,7 @@ static const struct arm64_cpu_capabiliti
+               .cpu_enable = cpu_enable_hw_dbm,
+       },
+ #endif
++#ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypassing Safe (SSBS)",
+               .capability = ARM64_SSBS,
+@@ -1267,7 +1310,9 @@ static const struct arm64_cpu_capabiliti
+               .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
++              .cpu_enable = cpu_enable_ssbs,
+       },
++#endif
+       {},
+ };
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -358,6 +358,10 @@ int copy_thread(unsigned long clone_flag
+               if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+                   cpus_have_const_cap(ARM64_HAS_UAO))
+                       childregs->pstate |= PSR_UAO_BIT;
++
++              if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++                      childregs->pstate |= PSR_SSBS_BIT;
++
+               p->thread.cpu_context.x19 = stack_start;
+               p->thread.cpu_context.x20 = stk_sz;
+       }
+--- a/arch/arm64/kernel/ssbd.c
++++ b/arch/arm64/kernel/ssbd.c
+@@ -3,13 +3,31 @@
+  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+  */
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/prctl.h>
+ #include <linux/sched.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/thread_info.h>
+ #include <asm/cpufeature.h>
++static void ssbd_ssbs_enable(struct task_struct *task)
++{
++      u64 val = is_compat_thread(task_thread_info(task)) ?
++                PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
++
++      task_pt_regs(task)->pstate |= val;
++}
++
++static void ssbd_ssbs_disable(struct task_struct *task)
++{
++      u64 val = is_compat_thread(task_thread_info(task)) ?
++                PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
++
++      task_pt_regs(task)->pstate &= ~val;
++}
++
+ /*
+  * prctl interface for SSBD
+  * FIXME: Drop the below ifdefery once merged in 4.18.
+@@ -47,12 +65,14 @@ static int ssbd_prctl_set(struct task_st
+                       return -EPERM;
+               task_clear_spec_ssb_disable(task);
+               clear_tsk_thread_flag(task, TIF_SSBD);
++              ssbd_ssbs_enable(task);
+               break;
+       case PR_SPEC_DISABLE:
+               if (state == ARM64_SSBD_FORCE_DISABLE)
+                       return -EPERM;
+               task_set_spec_ssb_disable(task);
+               set_tsk_thread_flag(task, TIF_SSBD);
++              ssbd_ssbs_disable(task);
+               break;
+       case PR_SPEC_FORCE_DISABLE:
+               if (state == ARM64_SSBD_FORCE_DISABLE)
+@@ -60,6 +80,7 @@ static int ssbd_prctl_set(struct task_st
+               task_set_spec_ssb_disable(task);
+               task_set_spec_ssb_force_disable(task);
+               set_tsk_thread_flag(task, TIF_SSBD);
++              ssbd_ssbs_disable(task);
+               break;
+       default:
+               return -ERANGE;
diff --git a/queue-4.19/arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch b/queue-4.19/arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
new file mode 100644 (file)
index 0000000..9b0e811
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Wed 09 Oct 2019 10:02:12 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:29 +0200
+Subject: arm64: ssbs: Don't treat CPUs with SSBS as unaffected by SSB
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Will Deacon <will.deacon@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-16-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit eb337cdfcd5dd3b10522c2f34140a73a4c285c30 ]
+
+SSBS provides a relatively cheap mitigation for SSB, but it is still a
+mitigation and its presence does not indicate that the CPU is unaffected
+by the vulnerability.
+
+Tweak the mitigation logic so that we report the correct string in sysfs.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -341,15 +341,17 @@ static bool has_ssbd_mitigation(const st
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++      /* delay setting __ssb_safe until we get a firmware response */
++      if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
++              this_cpu_safe = true;
++
+       if (this_cpu_has_cap(ARM64_SSBS)) {
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               required = false;
+               goto out_printmsg;
+       }
+-      /* delay setting __ssb_safe until we get a firmware response */
+-      if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+-              this_cpu_safe = true;
+-
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+               ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
diff --git a/queue-4.19/kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch b/queue-4.19/kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch
new file mode 100644 (file)
index 0000000..48c6d2e
--- /dev/null
@@ -0,0 +1,69 @@
+From foo@baz Wed 09 Oct 2019 10:02:11 AM CEST
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue,  8 Oct 2019 17:39:17 +0200
+Subject: KVM: arm64: Set SCTLR_EL2.DSSBS if SSBD is forcefully disabled and !vhe
+To: linux-arm-kernel@lists.infradead.org
+Cc: stable@vger.kernel.org, Will Deacon <will.deacon@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, Catalin Marinas <catalin.marinas@arm.com>, Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Message-ID: <20191008153930.15386-4-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit 7c36447ae5a090729e7b129f24705bb231a07e0b ]
+
+When running without VHE, it is necessary to set SCTLR_EL2.DSSBS if SSBD
+has been forcefully disabled on the kernel command-line.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |   11 +++++++++++
+ arch/arm64/kvm/hyp/sysreg-sr.c    |   11 +++++++++++
+ 2 files changed, 22 insertions(+)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -398,6 +398,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struc
+ DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
++void __kvm_enable_ssbs(void);
++
+ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+                                      unsigned long hyp_stack_ptr,
+                                      unsigned long vector_ptr)
+@@ -418,6 +420,15 @@ static inline void __cpu_init_hyp_mode(p
+        */
+       BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+       __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
++
++      /*
++       * Disabling SSBD on a non-VHE system requires us to enable SSBS
++       * at EL2.
++       */
++      if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
++          arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
++              kvm_call_hyp(__kvm_enable_ssbs);
++      }
+ }
+ static inline bool kvm_arch_check_sve_has_vhe(void)
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -293,3 +293,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcp
+       vcpu->arch.sysregs_loaded_on_cpu = false;
+ }
++
++void __hyp_text __kvm_enable_ssbs(void)
++{
++      u64 tmp;
++
++      asm volatile(
++      "mrs    %0, sctlr_el2\n"
++      "orr    %0, %0, %1\n"
++      "msr    sctlr_el2, %0"
++      : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
++}
index 9432a37f0591e74e98504460028cfd35f6b3592c..cfb207662302225a869d9e49e8a87be75026ee64 100644 (file)
@@ -86,3 +86,18 @@ perf-tools-fix-segfault-in-cpu_cache_level__read.patch
 perf-stat-reset-previous-counts-on-repeat-with-inter.patch
 drm-i915-userptr-acquire-the-page-lock-around-set_pa.patch
 riscv-avoid-interrupts-being-erroneously-enabled-in-.patch
+arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch
+kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch
+arm64-docs-document-ssbs-hwcap.patch
+arm64-fix-ssbs-sanitization.patch
+arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch
+arm64-add-sysfs-vulnerability-show-for-meltdown.patch
+arm64-enable-generic-cpu-vulnerabilites-support.patch
+arm64-always-enable-ssb-vulnerability-detection.patch
+arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch
+arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch
+arm64-always-enable-spectre-v2-vulnerability-detection.patch
+arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch
+arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
+arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
+arm64-force-ssbs-on-context-switch.patch