]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Oct 2019 08:55:04 +0000 (09:55 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 27 Oct 2019 08:55:04 +0000 (09:55 +0100)
added patches:
arm64-add-helpers-for-checking-cpu-midr-against-a-range.patch
arm64-add-midr-encoding-for-arm-cortex-a55-and-cortex-a35.patch
arm64-add-psr_aa32_-definitions.patch
arm64-add-sysfs-vulnerability-show-for-meltdown.patch
arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch
arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch
arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch
arm64-always-enable-spectre-v2-vulnerability-detection.patch
arm64-always-enable-ssb-vulnerability-detection.patch
arm64-capabilities-add-flags-to-handle-the-conflicts-on-late-cpu.patch
arm64-capabilities-add-support-for-checks-based-on-a-list-of-midrs.patch
arm64-capabilities-add-support-for-features-enabled-early.patch
arm64-capabilities-allow-features-based-on-local-cpu-scope.patch
arm64-capabilities-change-scope-of-vhe-to-boot-cpu-feature.patch
arm64-capabilities-clean-up-midr-range-helpers.patch
arm64-capabilities-filter-the-entries-based-on-a-given-mask.patch
arm64-capabilities-group-handling-of-features-and-errata-workarounds.patch
arm64-capabilities-introduce-weak-features-based-on-local-cpu.patch
arm64-capabilities-move-errata-processing-code.patch
arm64-capabilities-move-errata-work-around-check-on-boot-cpu.patch
arm64-capabilities-prepare-for-fine-grained-capabilities.patch
arm64-capabilities-prepare-for-grouping-features-and-errata-work-arounds.patch
arm64-capabilities-restrict-kpti-detection-to-boot-time-cpus.patch
arm64-capabilities-split-the-processing-of-errata-work-arounds.patch
arm64-capabilities-unify-the-verification.patch
arm64-capabilities-update-prototype-for-enable-call-back.patch
arm64-cpufeature-detect-ssbs-and-advertise-to-userspace.patch
arm64-documentation-cpu-feature-registers-remove-res0-fields.patch
arm64-don-t-zero-dit-on-signal-return.patch
arm64-enable-generic-cpu-vulnerabilites-support.patch
arm64-expose-arm-v8.4-features.patch
arm64-expose-support-for-optional-armv8-a-features.patch
arm64-fix-ssbs-sanitization.patch
arm64-fix-the-feature-type-for-id-register-fields.patch
arm64-force-ssbs-on-context-switch.patch
arm64-get-rid-of-__smccc_workaround_1_hvc_.patch
arm64-introduce-sysreg_clear_set.patch
arm64-kvm-use-smccc_arch_workaround_1-for-falkor-bp-hardening.patch
arm64-move-sctlr_el-1-2-assertions-to-asm-sysreg.h.patch
arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch
arm64-speculation-support-mitigations-cmdline-option.patch
arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch
arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
arm64-sysreg-move-to-use-definitions-for-all-the-sctlr-bits.patch
arm64-use-firmware-to-detect-cpus-that-are-not-affected-by-spectre-v2.patch
arm64-v8.4-support-for-new-floating-point-multiplication-instructions.patch
asoc-rsnd-reinitialize-bit-clock-inversion-flag-for-every-format-setting.patch
cfg80211-wext-avoid-copying-malformed-ssids.patch
drivers-base-memory.c-don-t-access-uninitialized-memmaps-in-soft_offline_page_store.patch
drm-amdgpu-bail-earlier-when-amdgpu.cik_-si_support-is-not-set-to-1.patch
drm-edid-add-6-bpc-quirk-for-sdc-panel-in-lenovo-g50.patch
fs-proc-page.c-don-t-access-uninitialized-memmaps-in-fs-proc-page.c.patch
input-da9063-fix-capability-and-drop-key_sleep.patch
input-synaptics-rmi4-avoid-processing-unknown-irqs.patch
kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch
mac80211-reject-malformed-ssid-elements.patch
mips-tlbex-fix-build_restore_pagemask-kscratch-restore.patch
scsi-ch-make-it-possible-to-open-a-ch-device-multiple-times-again.patch
scsi-core-save-restore-command-resid-for-error-handling.patch
scsi-core-try-to-get-module-before-removing-device.patch
scsi-sd-ignore-a-failure-to-sync-cache-due-to-lack-of-authorization.patch
staging-wlan-ng-fix-exit-return-when-sme-key_idx-num_wepkeys.patch

64 files changed:
queue-4.14/arm64-add-helpers-for-checking-cpu-midr-against-a-range.patch [new file with mode: 0644]
queue-4.14/arm64-add-midr-encoding-for-arm-cortex-a55-and-cortex-a35.patch [new file with mode: 0644]
queue-4.14/arm64-add-psr_aa32_-definitions.patch [new file with mode: 0644]
queue-4.14/arm64-add-sysfs-vulnerability-show-for-meltdown.patch [new file with mode: 0644]
queue-4.14/arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch [new file with mode: 0644]
queue-4.14/arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch [new file with mode: 0644]
queue-4.14/arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch [new file with mode: 0644]
queue-4.14/arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch [new file with mode: 0644]
queue-4.14/arm64-always-enable-spectre-v2-vulnerability-detection.patch [new file with mode: 0644]
queue-4.14/arm64-always-enable-ssb-vulnerability-detection.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-add-flags-to-handle-the-conflicts-on-late-cpu.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-add-support-for-checks-based-on-a-list-of-midrs.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-add-support-for-features-enabled-early.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-allow-features-based-on-local-cpu-scope.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-change-scope-of-vhe-to-boot-cpu-feature.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-clean-up-midr-range-helpers.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-filter-the-entries-based-on-a-given-mask.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-group-handling-of-features-and-errata-workarounds.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-introduce-weak-features-based-on-local-cpu.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-move-errata-processing-code.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-move-errata-work-around-check-on-boot-cpu.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-prepare-for-fine-grained-capabilities.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-prepare-for-grouping-features-and-errata-work-arounds.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-restrict-kpti-detection-to-boot-time-cpus.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-split-the-processing-of-errata-work-arounds.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-unify-the-verification.patch [new file with mode: 0644]
queue-4.14/arm64-capabilities-update-prototype-for-enable-call-back.patch [new file with mode: 0644]
queue-4.14/arm64-cpufeature-detect-ssbs-and-advertise-to-userspace.patch [new file with mode: 0644]
queue-4.14/arm64-documentation-cpu-feature-registers-remove-res0-fields.patch [new file with mode: 0644]
queue-4.14/arm64-don-t-zero-dit-on-signal-return.patch [new file with mode: 0644]
queue-4.14/arm64-enable-generic-cpu-vulnerabilites-support.patch [new file with mode: 0644]
queue-4.14/arm64-expose-arm-v8.4-features.patch [new file with mode: 0644]
queue-4.14/arm64-expose-support-for-optional-armv8-a-features.patch [new file with mode: 0644]
queue-4.14/arm64-fix-ssbs-sanitization.patch [new file with mode: 0644]
queue-4.14/arm64-fix-the-feature-type-for-id-register-fields.patch [new file with mode: 0644]
queue-4.14/arm64-force-ssbs-on-context-switch.patch [new file with mode: 0644]
queue-4.14/arm64-get-rid-of-__smccc_workaround_1_hvc_.patch [new file with mode: 0644]
queue-4.14/arm64-introduce-sysreg_clear_set.patch [new file with mode: 0644]
queue-4.14/arm64-kvm-use-smccc_arch_workaround_1-for-falkor-bp-hardening.patch [new file with mode: 0644]
queue-4.14/arm64-move-sctlr_el-1-2-assertions-to-asm-sysreg.h.patch [new file with mode: 0644]
queue-4.14/arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch [new file with mode: 0644]
queue-4.14/arm64-speculation-support-mitigations-cmdline-option.patch [new file with mode: 0644]
queue-4.14/arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch [new file with mode: 0644]
queue-4.14/arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch [new file with mode: 0644]
queue-4.14/arm64-sysreg-move-to-use-definitions-for-all-the-sctlr-bits.patch [new file with mode: 0644]
queue-4.14/arm64-use-firmware-to-detect-cpus-that-are-not-affected-by-spectre-v2.patch [new file with mode: 0644]
queue-4.14/arm64-v8.4-support-for-new-floating-point-multiplication-instructions.patch [new file with mode: 0644]
queue-4.14/asoc-rsnd-reinitialize-bit-clock-inversion-flag-for-every-format-setting.patch [new file with mode: 0644]
queue-4.14/cfg80211-wext-avoid-copying-malformed-ssids.patch [new file with mode: 0644]
queue-4.14/drivers-base-memory.c-don-t-access-uninitialized-memmaps-in-soft_offline_page_store.patch [new file with mode: 0644]
queue-4.14/drm-amdgpu-bail-earlier-when-amdgpu.cik_-si_support-is-not-set-to-1.patch [new file with mode: 0644]
queue-4.14/drm-edid-add-6-bpc-quirk-for-sdc-panel-in-lenovo-g50.patch [new file with mode: 0644]
queue-4.14/fs-proc-page.c-don-t-access-uninitialized-memmaps-in-fs-proc-page.c.patch [new file with mode: 0644]
queue-4.14/input-da9063-fix-capability-and-drop-key_sleep.patch [new file with mode: 0644]
queue-4.14/input-synaptics-rmi4-avoid-processing-unknown-irqs.patch [new file with mode: 0644]
queue-4.14/kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch [new file with mode: 0644]
queue-4.14/mac80211-reject-malformed-ssid-elements.patch [new file with mode: 0644]
queue-4.14/mips-tlbex-fix-build_restore_pagemask-kscratch-restore.patch [new file with mode: 0644]
queue-4.14/scsi-ch-make-it-possible-to-open-a-ch-device-multiple-times-again.patch [new file with mode: 0644]
queue-4.14/scsi-core-save-restore-command-resid-for-error-handling.patch [new file with mode: 0644]
queue-4.14/scsi-core-try-to-get-module-before-removing-device.patch [new file with mode: 0644]
queue-4.14/scsi-sd-ignore-a-failure-to-sync-cache-due-to-lack-of-authorization.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/staging-wlan-ng-fix-exit-return-when-sme-key_idx-num_wepkeys.patch [new file with mode: 0644]

diff --git a/queue-4.14/arm64-add-helpers-for-checking-cpu-midr-against-a-range.patch b/queue-4.14/arm64-add-helpers-for-checking-cpu-midr-against-a-range.patch
new file mode 100644 (file)
index 0000000..b0c828f
--- /dev/null
@@ -0,0 +1,140 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:11 +0200
+Subject: arm64: Add helpers for checking CPU MIDR against a range
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-27-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 1df310505d6d544802016f6bae49aab836ae8510 ]
+
+Add helpers for checking if the given CPU midr falls in a range
+of variants/revisions for a given model.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    4 ++--
+ arch/arm64/include/asm/cputype.h    |   30 ++++++++++++++++++++++++++++++
+ arch/arm64/kernel/cpu_errata.c      |   18 +++++++-----------
+ 3 files changed, 39 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -10,6 +10,7 @@
+ #define __ASM_CPUFEATURE_H
+ #include <asm/cpucaps.h>
++#include <asm/cputype.h>
+ #include <asm/hwcap.h>
+ #include <asm/sysreg.h>
+@@ -302,8 +303,7 @@ struct arm64_cpu_capabilities {
+       void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
+       union {
+               struct {        /* To be used for erratum handling only */
+-                      u32 midr_model;
+-                      u32 midr_range_min, midr_range_max;
++                      struct midr_range midr_range;
+               };
+               struct {        /* Feature register checking */
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -126,6 +126,36 @@
+ #define read_cpuid(reg)                       read_sysreg_s(SYS_ ## reg)
+ /*
++ * Represent a range of MIDR values for a given CPU model and a
++ * range of variant/revision values.
++ *
++ * @model     - CPU model as defined by MIDR_CPU_MODEL
++ * @rv_min    - Minimum value for the revision/variant as defined by
++ *              MIDR_CPU_VAR_REV
++ * @rv_max    - Maximum value for the variant/revision for the range.
++ */
++struct midr_range {
++      u32 model;
++      u32 rv_min;
++      u32 rv_max;
++};
++
++#define MIDR_RANGE(m, v_min, r_min, v_max, r_max)             \
++      {                                                       \
++              .model = m,                                     \
++              .rv_min = MIDR_CPU_VAR_REV(v_min, r_min),       \
++              .rv_max = MIDR_CPU_VAR_REV(v_max, r_max),       \
++      }
++
++#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
++
++static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
++{
++      return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
++                               range->rv_min, range->rv_max);
++}
++
++/*
+  * The CPU ID never changes at run time, so we might as well tell the
+  * compiler that it's constant.  Use this function to read the CPU ID
+  * rather than directly reading processor_id or read_cpuid() directly.
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -26,10 +26,10 @@
+ static bool __maybe_unused
+ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+ {
++      u32 midr = read_cpuid_id();
++
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+-      return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
+-                                     entry->midr_range_min,
+-                                     entry->midr_range_max);
++      return is_midr_in_range(midr, &entry->midr_range);
+ }
+ static bool __maybe_unused
+@@ -43,7 +43,7 @@ is_kryo_midr(const struct arm64_cpu_capa
+       model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
+                MIDR_ARCHITECTURE_MASK;
+-      return model == entry->midr_model;
++      return model == entry->midr_range.model;
+ }
+ static bool
+@@ -407,15 +407,11 @@ static bool has_ssbd_mitigation(const st
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)     \
+       .matches = is_affected_midr_range,                      \
+-      .midr_model = model,                                    \
+-      .midr_range_min = MIDR_CPU_VAR_REV(v_min, r_min),       \
+-      .midr_range_max = MIDR_CPU_VAR_REV(v_max, r_max)
++      .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+ #define CAP_MIDR_ALL_VERSIONS(model)                                  \
+       .matches = is_affected_midr_range,                              \
+-      .midr_model = model,                                            \
+-      .midr_range_min = MIDR_CPU_VAR_REV(0, 0),                       \
+-      .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
++      .midr_range = MIDR_ALL_VERSIONS(model)
+ #define MIDR_FIXED(rev, revidr_mask) \
+       .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
+@@ -556,7 +552,7 @@ const struct arm64_cpu_capabilities arm6
+               .desc = "Qualcomm Technologies Kryo erratum 1003",
+               .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+-              .midr_model = MIDR_QCOM_KRYO,
++              .midr_range.model = MIDR_QCOM_KRYO,
+               .matches = is_kryo_midr,
+       },
+ #endif
diff --git a/queue-4.14/arm64-add-midr-encoding-for-arm-cortex-a55-and-cortex-a35.patch b/queue-4.14/arm64-add-midr-encoding-for-arm-cortex-a55-and-cortex-a35.patch
new file mode 100644 (file)
index 0000000..d11152a
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:12 +0200
+Subject: arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-28-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 6e616864f21160d8d503523b60a53a29cecc6f24 ]
+
+Update the MIDR encodings for the Cortex-A55 and Cortex-A35
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -85,6 +85,8 @@
+ #define ARM_CPU_PART_CORTEX_A53               0xD03
+ #define ARM_CPU_PART_CORTEX_A73               0xD09
+ #define ARM_CPU_PART_CORTEX_A75               0xD0A
++#define ARM_CPU_PART_CORTEX_A35               0xD04
++#define ARM_CPU_PART_CORTEX_A55               0xD05
+ #define APM_CPU_PART_POTENZA          0x000
+@@ -108,6 +110,8 @@
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+ #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+ #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
++#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
++#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/queue-4.14/arm64-add-psr_aa32_-definitions.patch b/queue-4.14/arm64-add-psr_aa32_-definitions.patch
new file mode 100644 (file)
index 0000000..486c746
--- /dev/null
@@ -0,0 +1,123 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:53 +0200
+Subject: arm64: add PSR_AA32_* definitions
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Christoffer Dall <christoffer.dall@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-9-ard.biesheuvel@linaro.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 25086263425641c74123f9387426c23072b299ea ]
+
+The AArch32 CPSR/SPSR format is *almost* identical to the AArch64
+SPSR_ELx format for exceptions taken from AArch32, but the two have
+diverged with the addition of DIT, and we need to treat the two as
+logically distinct.
+
+This patch adds new definitions for the SPSR_ELx format for exceptions
+taken from AArch32, with a consistent PSR_AA32_ prefix. The existing
+COMPAT_PSR_ definitions will be used for the PSR format as seen from
+AArch32.
+
+Definitions of DIT are provided for both, and inline functions are
+provided to map between the two formats. Note that for SPSR_ELx, the
+(RES0) J bit has been re-allocated as the DIT bit.
+
+Once users of the COMPAT_PSR definitions have been migrated over to the
+PSR_AA32 definitions, the (majority of) the former will be removed, so
+no efforts is made to avoid duplication until then.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christoffer Dall <christoffer.dall@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Suzuki Poulose <suzuki.poulose@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/ptrace.h |   57 +++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 56 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -35,7 +35,37 @@
+ #define COMPAT_PTRACE_GETHBPREGS      29
+ #define COMPAT_PTRACE_SETHBPREGS      30
+-/* AArch32 CPSR bits */
++/* SPSR_ELx bits for exceptions taken from AArch32 */
++#define PSR_AA32_MODE_MASK    0x0000001f
++#define PSR_AA32_MODE_USR     0x00000010
++#define PSR_AA32_MODE_FIQ     0x00000011
++#define PSR_AA32_MODE_IRQ     0x00000012
++#define PSR_AA32_MODE_SVC     0x00000013
++#define PSR_AA32_MODE_ABT     0x00000017
++#define PSR_AA32_MODE_HYP     0x0000001a
++#define PSR_AA32_MODE_UND     0x0000001b
++#define PSR_AA32_MODE_SYS     0x0000001f
++#define PSR_AA32_T_BIT                0x00000020
++#define PSR_AA32_F_BIT                0x00000040
++#define PSR_AA32_I_BIT                0x00000080
++#define PSR_AA32_A_BIT                0x00000100
++#define PSR_AA32_E_BIT                0x00000200
++#define PSR_AA32_DIT_BIT      0x01000000
++#define PSR_AA32_Q_BIT                0x08000000
++#define PSR_AA32_V_BIT                0x10000000
++#define PSR_AA32_C_BIT                0x20000000
++#define PSR_AA32_Z_BIT                0x40000000
++#define PSR_AA32_N_BIT                0x80000000
++#define PSR_AA32_IT_MASK      0x0600fc00      /* If-Then execution state mask */
++#define PSR_AA32_GE_MASK      0x000f0000
++
++#ifdef CONFIG_CPU_BIG_ENDIAN
++#define PSR_AA32_ENDSTATE     PSR_AA32_E_BIT
++#else
++#define PSR_AA32_ENDSTATE     0
++#endif
++
++/* AArch32 CPSR bits, as seen in AArch32 */
+ #define COMPAT_PSR_MODE_MASK  0x0000001f
+ #define COMPAT_PSR_MODE_USR   0x00000010
+ #define COMPAT_PSR_MODE_FIQ   0x00000011
+@@ -50,6 +80,7 @@
+ #define COMPAT_PSR_I_BIT      0x00000080
+ #define COMPAT_PSR_A_BIT      0x00000100
+ #define COMPAT_PSR_E_BIT      0x00000200
++#define COMPAT_PSR_DIT_BIT    0x00200000
+ #define COMPAT_PSR_J_BIT      0x01000000
+ #define COMPAT_PSR_Q_BIT      0x08000000
+ #define COMPAT_PSR_V_BIT      0x10000000
+@@ -111,6 +142,30 @@
+ #define compat_sp_fiq regs[29]
+ #define compat_lr_fiq regs[30]
++static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
++{
++      unsigned long pstate;
++
++      pstate = psr & ~COMPAT_PSR_DIT_BIT;
++
++      if (psr & COMPAT_PSR_DIT_BIT)
++              pstate |= PSR_AA32_DIT_BIT;
++
++      return pstate;
++}
++
++static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
++{
++      unsigned long psr;
++
++      psr = pstate & ~PSR_AA32_DIT_BIT;
++
++      if (pstate & PSR_AA32_DIT_BIT)
++              psr |= COMPAT_PSR_DIT_BIT;
++
++      return psr;
++}
++
+ /*
+  * This struct defines the way the registers are stored on the stack during an
+  * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
diff --git a/queue-4.14/arm64-add-sysfs-vulnerability-show-for-meltdown.patch b/queue-4.14/arm64-add-sysfs-vulnerability-show-for-meltdown.patch
new file mode 100644 (file)
index 0000000..445dd89
--- /dev/null
@@ -0,0 +1,149 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:22 +0200
+Subject: arm64: add sysfs vulnerability show for meltdown
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-38-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit 1b3ccf4be0e7be8c4bd8522066b6cbc92591e912 ]
+
+We implement page table isolation as a mitigation for meltdown.
+Report this to userspace via sysfs.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   58 +++++++++++++++++++++++++++++++----------
+ 1 file changed, 44 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -824,7 +824,7 @@ static bool has_no_fpsimd(const struct a
+                                       ID_AA64PFR0_FP_SHIFT) < 0;
+ }
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++static bool __meltdown_safe = true;
+ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+@@ -842,6 +842,16 @@ static bool unmap_kernel_at_el0(const st
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       };
+       char const *str = "command line option";
++      bool meltdown_safe;
++
++      meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
++
++      /* Defer to CPU feature registers */
++      if (has_cpuid_feature(entry, scope))
++              meltdown_safe = true;
++
++      if (!meltdown_safe)
++              __meltdown_safe = false;
+       /*
+        * For reasons that aren't entirely clear, enabling KPTI on Cavium
+@@ -853,6 +863,19 @@ static bool unmap_kernel_at_el0(const st
+               __kpti_forced = -1;
+       }
++      /* Useful for KASLR robustness */
++      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
++              if (!__kpti_forced) {
++                      str = "KASLR";
++                      __kpti_forced = 1;
++              }
++      }
++
++      if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
++              pr_info_once("kernel page table isolation disabled by kernel configuration\n");
++              return false;
++      }
++
+       /* Forced? */
+       if (__kpti_forced) {
+               pr_info_once("kernel page table isolation forced %s by %s\n",
+@@ -860,18 +883,10 @@ static bool unmap_kernel_at_el0(const st
+               return __kpti_forced > 0;
+       }
+-      /* Useful for KASLR robustness */
+-      if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+-              return true;
+-
+-      /* Don't force KPTI for CPUs that are not vulnerable */
+-      if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+-              return false;
+-
+-      /* Defer to CPU feature registers */
+-      return !has_cpuid_feature(entry, scope);
++      return !meltdown_safe;
+ }
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ static void
+ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+ {
+@@ -896,6 +911,12 @@ kpti_install_ng_mappings(const struct ar
+       return;
+ }
++#else
++static void
++kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
++{
++}
++#endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ static int __init parse_kpti(char *str)
+ {
+@@ -909,7 +930,6 @@ static int __init parse_kpti(char *str)
+       return 0;
+ }
+ early_param("kpti", parse_kpti);
+-#endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
+ {
+@@ -1056,7 +1076,6 @@ static const struct arm64_cpu_capabiliti
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = hyp_offset_low,
+       },
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       {
+               .desc = "Kernel page table isolation (KPTI)",
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+@@ -1072,7 +1091,6 @@ static const struct arm64_cpu_capabiliti
+               .matches = unmap_kernel_at_el0,
+               .cpu_enable = kpti_install_ng_mappings,
+       },
+-#endif
+       {
+               /* FP/SIMD is not implemented */
+               .capability = ARM64_HAS_NO_FPSIMD,
+@@ -1629,3 +1647,15 @@ static int __init enable_mrs_emulation(v
+ }
+ core_initcall(enable_mrs_emulation);
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
++                        char *buf)
++{
++      if (__meltdown_safe)
++              return sprintf(buf, "Not affected\n");
++
++      if (arm64_kernel_unmapped_at_el0())
++              return sprintf(buf, "Mitigation: PTI\n");
++
++      return sprintf(buf, "Vulnerable\n");
++}
diff --git a/queue-4.14/arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch b/queue-4.14/arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch
new file mode 100644 (file)
index 0000000..1855a62
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:21 +0200
+Subject: arm64: Add sysfs vulnerability show for spectre-v1
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Mian Yousaf Kaukab <ykaukab@suse.de>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-37-ard.biesheuvel@linaro.org>
+
+From: Mian Yousaf Kaukab <ykaukab@suse.de>
+
+[ Upstream commit 3891ebccace188af075ce143d8b072b65e90f695 ]
+
+spectre-v1 has been mitigated and the mitigation is always active.
+Report this to userspace via sysfs
+
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Acked-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -638,3 +638,9 @@ const struct arm64_cpu_capabilities arm6
+       {
+       }
+ };
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
diff --git a/queue-4.14/arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch b/queue-4.14/arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch
new file mode 100644 (file)
index 0000000..a2b4f68
--- /dev/null
@@ -0,0 +1,92 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:28 +0200
+Subject: arm64: add sysfs vulnerability show for spectre-v2
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-44-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit d2532e27b5638bb2e2dd52b80b7ea2ec65135377 ]
+
+Track whether all the cores in the machine are vulnerable to Spectre-v2,
+and whether all the vulnerable cores have been mitigated. We then expose
+this information to userspace via sysfs.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -456,6 +456,10 @@ out_printmsg:
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
+       CAP_MIDR_RANGE_LIST(midr_list)
++/* Track overall mitigation state. We are only mitigated if all cores are ok */
++static bool __hardenbp_enab = true;
++static bool __spectrev2_safe = true;
++
+ /*
+  * List of CPUs that do not need any Spectre-v2 mitigation at all.
+  */
+@@ -466,6 +470,10 @@ static const struct midr_range spectre_v
+       { /* sentinel */ }
+ };
++/*
++ * Track overall bp hardening for all heterogeneous cores in the machine.
++ * We are only considered "safe" if all booted cores are known safe.
++ */
+ static bool __maybe_unused
+ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+ {
+@@ -487,6 +495,8 @@ check_branch_predictor(const struct arm6
+       if (!need_wa)
+               return false;
++      __spectrev2_safe = false;
++
+       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+               __hardenbp_enab = false;
+@@ -496,11 +506,14 @@ check_branch_predictor(const struct arm6
+       /* forced off */
+       if (__nospectre_v2) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
++              __hardenbp_enab = false;
+               return false;
+       }
+-      if (need_wa < 0)
++      if (need_wa < 0) {
+               pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
++              __hardenbp_enab = false;
++      }
+       return (need_wa > 0);
+ }
+@@ -663,3 +676,15 @@ ssize_t cpu_show_spectre_v1(struct devic
+ {
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ }
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
++              char *buf)
++{
++      if (__spectrev2_safe)
++              return sprintf(buf, "Not affected\n");
++
++      if (__hardenbp_enab)
++              return sprintf(buf, "Mitigation: Branch predictor hardening\n");
++
++      return sprintf(buf, "Vulnerable\n");
++}
diff --git a/queue-4.14/arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch b/queue-4.14/arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
new file mode 100644 (file)
index 0000000..b46e151
--- /dev/null
@@ -0,0 +1,149 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:29 +0200
+Subject: arm64: add sysfs vulnerability show for speculative store bypass
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-45-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit 526e065dbca6df0b5a130b84b836b8b3c9f54e21 ]
+
+Return status based on ssbd_state and __ssb_safe. If the
+mitigation is disabled, or the firmware isn't responding then
+return the expected machine state based on a whitelist of known
+good cores.
+
+Given a heterogeneous machine, the overall machine vulnerability
+defaults to safe but is reset to unsafe when we miss the whitelist
+and the firmware doesn't explicitly tell us the core is safe.
+In order to make that work we delay transitioning to vulnerable
+until we know the firmware isn't responding to avoid a case
+where we miss the whitelist, but the firmware goes ahead and
+reports the core is not vulnerable. If all the cores in the
+machine have SSBS, then __ssb_safe will remain true.
+
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   42 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -225,6 +225,7 @@ static int detect_harden_bp_fw(void)
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+ int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
++static bool __ssb_safe = true;
+ static const struct ssbd_options {
+       const char      *str;
+@@ -328,6 +329,7 @@ static bool has_ssbd_mitigation(const st
+       struct arm_smccc_res res;
+       bool required = true;
+       s32 val;
++      bool this_cpu_safe = false;
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+@@ -336,8 +338,14 @@ static bool has_ssbd_mitigation(const st
+               goto out_printmsg;
+       }
++      /* delay setting __ssb_safe until we get a firmware response */
++      if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
++              this_cpu_safe = true;
++
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+               ssbd_state = ARM64_SSBD_UNKNOWN;
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
+       }
+@@ -354,6 +362,8 @@ static bool has_ssbd_mitigation(const st
+       default:
+               ssbd_state = ARM64_SSBD_UNKNOWN;
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
+       }
+@@ -362,14 +372,18 @@ static bool has_ssbd_mitigation(const st
+       switch (val) {
+       case SMCCC_RET_NOT_SUPPORTED:
+               ssbd_state = ARM64_SSBD_UNKNOWN;
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
++      /* machines with mixed mitigation requirements must not return this */
+       case SMCCC_RET_NOT_REQUIRED:
+               pr_info_once("%s mitigation not required\n", entry->desc);
+               ssbd_state = ARM64_SSBD_MITIGATED;
+               return false;
+       case SMCCC_RET_SUCCESS:
++              __ssb_safe = false;
+               required = true;
+               break;
+@@ -379,6 +393,8 @@ static bool has_ssbd_mitigation(const st
+       default:
+               WARN_ON(1);
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               return false;
+       }
+@@ -419,6 +435,14 @@ out_printmsg:
+       return required;
+ }
++/* known invulnerable cores */
++static const struct midr_range arm64_ssb_cpus[] = {
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
++      {},
++};
++
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)     \
+       .matches = is_affected_midr_range,                      \
+       .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+@@ -666,6 +690,7 @@ const struct arm64_cpu_capabilities arm6
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .capability = ARM64_SSBD,
+               .matches = has_ssbd_mitigation,
++              .midr_range_list = arm64_ssb_cpus,
+       },
+       {
+       }
+@@ -688,3 +713,20 @@ ssize_t cpu_show_spectre_v2(struct devic
+       return sprintf(buf, "Vulnerable\n");
+ }
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev,
++              struct device_attribute *attr, char *buf)
++{
++      if (__ssb_safe)
++              return sprintf(buf, "Not affected\n");
++
++      switch (ssbd_state) {
++      case ARM64_SSBD_KERNEL:
++      case ARM64_SSBD_FORCE_ENABLE:
++              if (IS_ENABLED(CONFIG_ARM64_SSBD))
++                      return sprintf(buf,
++                          "Mitigation: Speculative Store Bypass disabled via prctl\n");
++      }
++
++      return sprintf(buf, "Vulnerable\n");
++}
diff --git a/queue-4.14/arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch b/queue-4.14/arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch
new file mode 100644 (file)
index 0000000..5d1b3ba
--- /dev/null
@@ -0,0 +1,223 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:26 +0200
+Subject: arm64: Advertise mitigation of Spectre-v2, or lack thereof
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-42-ard.biesheuvel@linaro.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 73f38166095947f3b86b02fbed6bd592223a7ac8 ]
+
+We currently have a list of CPUs affected by Spectre-v2, for which
+we check that the firmware implements ARCH_WORKAROUND_1. It turns
+out that not all firmwares do implement the required mitigation,
+and that we fail to let the user know about it.
+
+Instead, let's slightly revamp our checks, and rely on a whitelist
+of cores that are known to be non-vulnerable, and let the user know
+the status of the mitigation in the kernel log.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |  108 +++++++++++++++++++++--------------------
+ 1 file changed, 56 insertions(+), 52 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -98,9 +98,9 @@ static void __copy_hyp_vect_bpi(int slot
+       flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+ }
+-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+-                                    const char *hyp_vecs_start,
+-                                    const char *hyp_vecs_end)
++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
++                                  const char *hyp_vecs_start,
++                                  const char *hyp_vecs_end)
+ {
+       static int last_slot = -1;
+       static DEFINE_SPINLOCK(bp_lock);
+@@ -130,7 +130,7 @@ static void __install_bp_hardening_cb(bp
+ #define __smccc_workaround_1_smc_start                NULL
+ #define __smccc_workaround_1_smc_end          NULL
+-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
++static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+                                     const char *hyp_vecs_end)
+ {
+@@ -138,23 +138,6 @@ static void __install_bp_hardening_cb(bp
+ }
+ #endif        /* CONFIG_KVM */
+-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+-                                   bp_hardening_cb_t fn,
+-                                   const char *hyp_vecs_start,
+-                                   const char *hyp_vecs_end)
+-{
+-      u64 pfr0;
+-
+-      if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+-              return;
+-
+-      pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+-      if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+-              return;
+-
+-      __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+-}
+-
+ #include <uapi/linux/psci.h>
+ #include <linux/arm-smccc.h>
+ #include <linux/psci.h>
+@@ -189,31 +172,27 @@ static int __init parse_nospectre_v2(cha
+ }
+ early_param("nospectre_v2", parse_nospectre_v2);
+-static void
+-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
++/*
++ * -1: No workaround
++ *  0: No workaround required
++ *  1: Workaround installed
++ */
++static int detect_harden_bp_fw(void)
+ {
+       bp_hardening_cb_t cb;
+       void *smccc_start, *smccc_end;
+       struct arm_smccc_res res;
+       u32 midr = read_cpuid_id();
+-      if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+-              return;
+-
+-      if (__nospectre_v2) {
+-              pr_info_once("spectrev2 mitigation disabled by command line option\n");
+-              return;
+-      }
+-
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+-              return;
++              return -1;
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if ((int)res.a0 < 0)
+-                      return;
++                      return -1;
+               cb = call_hvc_arch_workaround_1;
+               /* This is a guest, no need to patch KVM vectors */
+               smccc_start = NULL;
+@@ -224,23 +203,23 @@ enable_smccc_arch_workaround_1(const str
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if ((int)res.a0 < 0)
+-                      return;
++                      return -1;
+               cb = call_smc_arch_workaround_1;
+               smccc_start = __smccc_workaround_1_smc_start;
+               smccc_end = __smccc_workaround_1_smc_end;
+               break;
+       default:
+-              return;
++              return -1;
+       }
+       if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+           ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+               cb = qcom_link_stack_sanitization;
+-      install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
++      install_bp_hardening_cb(cb, smccc_start, smccc_end);
+-      return;
++      return 1;
+ }
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+@@ -479,23 +458,48 @@ out_printmsg:
+       CAP_MIDR_RANGE_LIST(midr_list)
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+-
+ /*
+- * List of CPUs where we need to issue a psci call to
+- * harden the branch predictor.
++ * List of CPUs that do not need any Spectre-v2 mitigation at all.
+  */
+-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+-      MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+-      MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+-      MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+-      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+-      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+-      {},
++static const struct midr_range spectre_v2_safe_list[] = {
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
++      { /* sentinel */ }
+ };
++static bool __maybe_unused
++check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
++{
++      int need_wa;
++
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++      /* If the CPU has CSV2 set, we're safe */
++      if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
++                                               ID_AA64PFR0_CSV2_SHIFT))
++              return false;
++
++      /* Alternatively, we have a list of unaffected CPUs */
++      if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
++              return false;
++
++      /* Fallback to firmware detection */
++      need_wa = detect_harden_bp_fw();
++      if (!need_wa)
++              return false;
++
++      /* forced off */
++      if (__nospectre_v2) {
++              pr_info_once("spectrev2 mitigation disabled by command line option\n");
++              return false;
++      }
++
++      if (need_wa < 0)
++              pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
++
++      return (need_wa > 0);
++}
+ #endif
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+@@ -639,8 +643,8 @@ const struct arm64_cpu_capabilities arm6
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+-              .cpu_enable = enable_smccc_arch_workaround_1,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++              .matches = check_branch_predictor,
+       },
+ #endif
+       {
diff --git a/queue-4.14/arm64-always-enable-spectre-v2-vulnerability-detection.patch b/queue-4.14/arm64-always-enable-spectre-v2-vulnerability-detection.patch
new file mode 100644 (file)
index 0000000..aac8a5a
--- /dev/null
@@ -0,0 +1,93 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:27 +0200
+Subject: arm64: Always enable spectre-v2 vulnerability detection
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-43-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit 8c1e3d2bb44cbb998cb28ff9a18f105fee7f1eb3 ]
+
+Ensure we are always able to detect whether or not the CPU is affected
+by Spectre-v2, so that we can later advertise this to userspace.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -76,7 +76,6 @@ cpu_enable_trap_ctr_access(const struct
+       config_sctlr_el1(SCTLR_EL1_UCT, 0);
+ }
+-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ #include <asm/mmu_context.h>
+ #include <asm/cacheflush.h>
+@@ -217,11 +216,11 @@ static int detect_harden_bp_fw(void)
+           ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+               cb = qcom_link_stack_sanitization;
+-      install_bp_hardening_cb(cb, smccc_start, smccc_end);
++      if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
++              install_bp_hardening_cb(cb, smccc_start, smccc_end);
+       return 1;
+ }
+-#endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+@@ -457,7 +456,6 @@ out_printmsg:
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
+       CAP_MIDR_RANGE_LIST(midr_list)
+-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ /*
+  * List of CPUs that do not need any Spectre-v2 mitigation at all.
+  */
+@@ -489,6 +487,12 @@ check_branch_predictor(const struct arm6
+       if (!need_wa)
+               return false;
++      if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
++              pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
++              __hardenbp_enab = false;
++              return false;
++      }
++
+       /* forced off */
+       if (__nospectre_v2) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
+@@ -500,7 +504,6 @@ check_branch_predictor(const struct arm6
+       return (need_wa > 0);
+ }
+-#endif
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #if   defined(CONFIG_ARM64_ERRATUM_826319) || \
+@@ -640,13 +643,11 @@ const struct arm64_cpu_capabilities arm6
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       },
+ #endif
+-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = check_branch_predictor,
+       },
+-#endif
+       {
+               .desc = "Speculative Store Bypass Disable",
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
diff --git a/queue-4.14/arm64-always-enable-ssb-vulnerability-detection.patch b/queue-4.14/arm64-always-enable-ssb-vulnerability-detection.patch
new file mode 100644 (file)
index 0000000..691dd26
--- /dev/null
@@ -0,0 +1,87 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:24 +0200
+Subject: arm64: Always enable ssb vulnerability detection
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-40-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit d42281b6e49510f078ace15a8ea10f71e6262581 ]
+
+Ensure we are always able to detect whether or not the CPU is affected
+by SSB, so that we can later advertise this to userspace.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+[will: Use IS_ENABLED instead of #ifdef]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    4 ----
+ arch/arm64/kernel/cpu_errata.c      |    9 +++++----
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -493,11 +493,7 @@ static inline int arm64_get_ssbd_state(v
+ #endif
+ }
+-#ifdef CONFIG_ARM64_SSBD
+ void arm64_set_ssbd_mitigation(bool state);
+-#else
+-static inline void arm64_set_ssbd_mitigation(bool state) {}
+-#endif
+ #endif /* __ASSEMBLY__ */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -231,7 +231,6 @@ enable_smccc_arch_workaround_1(const str
+ }
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+-#ifdef CONFIG_ARM64_SSBD
+ DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+ int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+@@ -304,6 +303,11 @@ void __init arm64_enable_wa2_handling(st
+ void arm64_set_ssbd_mitigation(bool state)
+ {
++      if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
++              pr_info_once("SSBD disabled by kernel configuration\n");
++              return;
++      }
++
+       if (this_cpu_has_cap(ARM64_SSBS)) {
+               if (state)
+                       asm volatile(SET_PSTATE_SSBS(0));
+@@ -423,7 +427,6 @@ out_printmsg:
+       return required;
+ }
+-#endif        /* CONFIG_ARM64_SSBD */
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)     \
+       .matches = is_affected_midr_range,                      \
+@@ -627,14 +630,12 @@ const struct arm64_cpu_capabilities arm6
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+ #endif
+-#ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypass Disable",
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .capability = ARM64_SSBD,
+               .matches = has_ssbd_mitigation,
+       },
+-#endif
+       {
+       }
+ };
diff --git a/queue-4.14/arm64-capabilities-add-flags-to-handle-the-conflicts-on-late-cpu.patch b/queue-4.14/arm64-capabilities-add-flags-to-handle-the-conflicts-on-late-cpu.patch
new file mode 100644 (file)
index 0000000..10dabea
--- /dev/null
@@ -0,0 +1,324 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:59 +0200
+Subject: arm64: capabilities: Add flags to handle the conflicts on late CPU
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-15-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 5b4747c5dce7a873e1e7fe1608835825f714267a ]
+
+When a CPU is brought up, it is checked against the caps that are
+known to be enabled on the system (via verify_local_cpu_capabilities()).
+Based on the state of the capability on the CPU vs. that of System we
+could have the following combinations of conflict.
+
+       x-----------------------------x
+       | Type  | System   | Late CPU |
+       |-----------------------------|
+       |  a    |   y      |    n     |
+       |-----------------------------|
+       |  b    |   n      |    y     |
+       x-----------------------------x
+
+Case (a) is not permitted for caps which are system features, which the
+system expects all the CPUs to have (e.g VHE). While (a) is ignored for
+all errata work arounds. However, there could be exceptions to the plain
+filtering approach. e.g, KPTI is an optional feature for a late CPU as
+long as the system already enables it.
+
+Case (b) is not permitted for errata work arounds that cannot be activated
+after the kernel has finished booting.And we ignore (b) for features. Here,
+yet again, KPTI is an exception, where if a late CPU needs KPTI we are too
+late to enable it (because we change the allocation of ASIDs etc).
+
+Add two different flags to indicate how the conflict should be handled.
+
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - CPUs may have the capability
+ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - CPUs may not have the cappability.
+
+Now that we have the flags to describe the behavior of the errata and
+the features, as we treat them, define types for ERRATUM and FEATURE.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |   68 ++++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/cpu_errata.c      |   12 +++---
+ arch/arm64/kernel/cpufeature.c      |   26 ++++++-------
+ 3 files changed, 87 insertions(+), 19 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -149,6 +149,7 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  *    an action, based on the severity (e.g, a CPU could be prevented from
+  *    booting or cause a kernel panic). The CPU is allowed to "affect" the
+  *    state of the capability, if it has not been finalised already.
++ *    See section 5 for more details on conflicts.
+  *
+  * 4) Action: As mentioned in (2), the kernel can take an action for each
+  *    detected capability, on all CPUs on the system. Appropriate actions
+@@ -166,6 +167,34 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  *
+  *      check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
+  *
++ * 5) Conflicts: Based on the state of the capability on a late CPU vs.
++ *    the system state, we could have the following combinations :
++ *
++ *            x-----------------------------x
++ *            | Type  | System   | Late CPU |
++ *            |-----------------------------|
++ *            |  a    |   y      |    n     |
++ *            |-----------------------------|
++ *            |  b    |   n      |    y     |
++ *            x-----------------------------x
++ *
++ *     Two separate flag bits are defined to indicate whether each kind of
++ *     conflict can be allowed:
++ *            ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
++ *            ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
++ *
++ *     Case (a) is not permitted for a capability that the system requires
++ *     all CPUs to have in order for the capability to be enabled. This is
++ *     typical for capabilities that represent enhanced functionality.
++ *
++ *     Case (b) is not permitted for a capability that must be enabled
++ *     during boot if any CPU in the system requires it in order to run
++ *     safely. This is typical for erratum work arounds that cannot be
++ *     enabled after the corresponding capability is finalised.
++ *
++ *     In some non-typical cases either both (a) and (b), or neither,
++ *     should be permitted. This can be described by including neither
++ *     or both flags in the capability's type field.
+  */
+@@ -179,6 +208,33 @@ extern struct arm64_ftr_reg arm64_ftr_re
+ #define SCOPE_SYSTEM                          ARM64_CPUCAP_SCOPE_SYSTEM
+ #define SCOPE_LOCAL_CPU                               ARM64_CPUCAP_SCOPE_LOCAL_CPU
++/*
++ * Is it permitted for a late CPU to have this capability when system
++ * hasn't already enabled it ?
++ */
++#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU   ((u16)BIT(4))
++/* Is it safe for a late CPU to miss this capability when system has it */
++#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU    ((u16)BIT(5))
++
++/*
++ * CPU errata workarounds that need to be enabled at boot time if one or
++ * more CPUs in the system requires it. When one of these capabilities
++ * has been enabled, it is safe to allow any CPU to boot that doesn't
++ * require the workaround. However, it is not safe if a "late" CPU
++ * requires a workaround and the system hasn't enabled it already.
++ */
++#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM                \
++      (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
++/*
++ * CPU feature detected at boot time based on system-wide value of a
++ * feature. It is safe for a late CPU to have this feature even though
++ * the system hasn't enabled it, although the featuer will not be used
++ * by Linux in this case. If the system has enabled this feature already,
++ * then every late CPU must have it.
++ */
++#define ARM64_CPUCAP_SYSTEM_FEATURE   \
++      (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
++
+ struct arm64_cpu_capabilities {
+       const char *desc;
+       u16 capability;
+@@ -212,6 +268,18 @@ static inline int cpucap_default_scope(c
+       return cap->type & ARM64_CPUCAP_SCOPE_MASK;
+ }
++static inline bool
++cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
++{
++      return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
++}
++
++static inline bool
++cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
++{
++      return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
++}
++
+ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+ extern struct static_key_false arm64_const_caps_ready;
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -406,14 +406,14 @@ static bool has_ssbd_mitigation(const st
+ #endif        /* CONFIG_ARM64_SSBD */
+ #define MIDR_RANGE(model, min, max) \
+-      .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU, \
++      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+       .matches = is_affected_midr_range, \
+       .midr_model = model, \
+       .midr_range_min = min, \
+       .midr_range_max = max
+ #define MIDR_ALL_VERSIONS(model) \
+-      .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU, \
++      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+       .matches = is_affected_midr_range, \
+       .midr_model = model, \
+       .midr_range_min = 0, \
+@@ -517,14 +517,14 @@ const struct arm64_cpu_capabilities arm6
+               .desc = "Mismatched cache line size",
+               .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
+               .matches = has_mismatched_cache_type,
+-              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .cpu_enable = cpu_enable_trap_ctr_access,
+       },
+       {
+               .desc = "Mismatched cache type",
+               .capability = ARM64_MISMATCHED_CACHE_TYPE,
+               .matches = has_mismatched_cache_type,
+-              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .cpu_enable = cpu_enable_trap_ctr_access,
+       },
+ #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+@@ -538,7 +538,7 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .desc = "Qualcomm Technologies Kryo erratum 1003",
+               .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+-              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .midr_model = MIDR_QCOM_KRYO,
+               .matches = is_kryo_midr,
+       },
+@@ -613,7 +613,7 @@ const struct arm64_cpu_capabilities arm6
+ #ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypass Disable",
+-              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .capability = ARM64_SSBD,
+               .matches = has_ssbd_mitigation,
+       },
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -924,7 +924,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "GIC system register CPU interface",
+               .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_useable_gicv3_cpuif,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .field_pos = ID_AA64PFR0_GIC_SHIFT,
+@@ -935,7 +935,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Privileged Access Never",
+               .capability = ARM64_HAS_PAN,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR1_EL1,
+               .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+@@ -948,7 +948,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "LSE atomic instructions",
+               .capability = ARM64_HAS_LSE_ATOMICS,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR0_EL1,
+               .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
+@@ -959,14 +959,14 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_no_hw_prefetch,
+       },
+ #ifdef CONFIG_ARM64_UAO
+       {
+               .desc = "User Access Override",
+               .capability = ARM64_HAS_UAO,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .field_pos = ID_AA64MMFR2_UAO_SHIFT,
+@@ -980,21 +980,21 @@ static const struct arm64_cpu_capabiliti
+ #ifdef CONFIG_ARM64_PAN
+       {
+               .capability = ARM64_ALT_PAN_NOT_UAO,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = cpufeature_pan_not_uao,
+       },
+ #endif /* CONFIG_ARM64_PAN */
+       {
+               .desc = "Virtualization Host Extensions",
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = runs_at_el2,
+               .cpu_enable = cpu_copy_el2regs,
+       },
+       {
+               .desc = "32-bit EL0 Support",
+               .capability = ARM64_HAS_32BIT_EL0,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .sign = FTR_UNSIGNED,
+@@ -1004,14 +1004,14 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Reduced HYP mapping offset",
+               .capability = ARM64_HYP_OFFSET_LOW,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = hyp_offset_low,
+       },
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       {
+               .desc = "Kernel page table isolation (KPTI)",
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = unmap_kernel_at_el0,
+               .cpu_enable = kpti_install_ng_mappings,
+       },
+@@ -1019,7 +1019,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               /* FP/SIMD is not implemented */
+               .capability = ARM64_HAS_NO_FPSIMD,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .min_field_value = 0,
+               .matches = has_no_fpsimd,
+       },
+@@ -1027,7 +1027,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Data cache clean to Point of Persistence",
+               .capability = ARM64_HAS_DCPOP,
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR1_EL1,
+               .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+@@ -1040,7 +1040,7 @@ static const struct arm64_cpu_capabiliti
+ #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)    \
+       {                                                       \
+               .desc = #cap,                                   \
+-              .type = ARM64_CPUCAP_SCOPE_SYSTEM,              \
++              .type = ARM64_CPUCAP_SYSTEM_FEATURE,            \
+               .matches = has_cpuid_feature,                   \
+               .sys_reg = reg,                                 \
+               .field_pos = field,                             \
diff --git a/queue-4.14/arm64-capabilities-add-support-for-checks-based-on-a-list-of-midrs.patch b/queue-4.14/arm64-capabilities-add-support-for-checks-based-on-a-list-of-midrs.patch
new file mode 100644 (file)
index 0000000..f22af48
--- /dev/null
@@ -0,0 +1,216 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:13 +0200
+Subject: arm64: capabilities: Add support for checks based on a list of MIDRs
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-29-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit be5b299830c63ed76e0357473c4218c85fb388b3 ]
+
+Add helpers for detecting an errata on list of midr ranges
+of affected CPUs, with the same work around.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+[ardb: add Cortex-A35 to kpti_safe_list[] as well]
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    1 
+ arch/arm64/include/asm/cputype.h    |    9 ++++
+ arch/arm64/kernel/cpu_errata.c      |   81 +++++++++++++++++++-----------------
+ arch/arm64/kernel/cpufeature.c      |   21 +++++----
+ 4 files changed, 66 insertions(+), 46 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -306,6 +306,7 @@ struct arm64_cpu_capabilities {
+                       struct midr_range midr_range;
+               };
++              const struct midr_range *midr_range_list;
+               struct {        /* Feature register checking */
+                       u32 sys_reg;
+                       u8 field_pos;
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -159,6 +159,15 @@ static inline bool is_midr_in_range(u32
+                                range->rv_min, range->rv_max);
+ }
++static inline bool
++is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
++{
++      while (ranges->model)
++              if (is_midr_in_range(midr, ranges++))
++                      return true;
++      return false;
++}
++
+ /*
+  * The CPU ID never changes at run time, so we might as well tell the
+  * compiler that it's constant.  Use this function to read the CPU ID
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -33,6 +33,14 @@ is_affected_midr_range(const struct arm6
+ }
+ static bool __maybe_unused
++is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
++                          int scope)
++{
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++      return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
++}
++
++static bool __maybe_unused
+ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
+ {
+       u32 model;
+@@ -420,6 +428,10 @@ static bool has_ssbd_mitigation(const st
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
+       CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
++#define CAP_MIDR_RANGE_LIST(list)                             \
++      .matches = is_affected_midr_range_list,                 \
++      .midr_range_list = list
++
+ /* Errata affecting a range of revisions of  given model variant */
+ #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)    \
+       ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
+@@ -433,6 +445,35 @@ static bool has_ssbd_mitigation(const st
+       .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
+       CAP_MIDR_ALL_VERSIONS(model)
++/* Errata affecting a list of midr ranges, with same work around */
++#define ERRATA_MIDR_RANGE_LIST(midr_list)                     \
++      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
++      CAP_MIDR_RANGE_LIST(midr_list)
++
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++
++/*
++ * List of CPUs where we need to issue a psci call to
++ * harden the branch predictor.
++ */
++static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++      MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
++      MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
++      MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
++      {},
++};
++
++static const struct midr_range qcom_bp_harden_cpus[] = {
++      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
++      MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
++      {},
++};
++
++#endif
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #if   defined(CONFIG_ARM64_ERRATUM_826319) || \
+       defined(CONFIG_ARM64_ERRATUM_827319) || \
+@@ -574,51 +615,17 @@ const struct arm64_cpu_capabilities arm6
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+-              .cpu_enable = enable_smccc_arch_workaround_1,
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+-              .cpu_enable = enable_smccc_arch_workaround_1,
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++              ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+-              .cpu_enable = enable_smccc_arch_workaround_1,
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+-              .cpu_enable = qcom_enable_link_stack_sanitization,
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
++              ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
+               .cpu_enable = qcom_enable_link_stack_sanitization,
+       },
+       {
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+-              .cpu_enable = enable_smccc_arch_workaround_1,
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+-              .cpu_enable = enable_smccc_arch_workaround_1,
++              ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
+       },
+ #endif
+ #ifdef CONFIG_ARM64_SSBD
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -826,6 +826,17 @@ static int __kpti_forced; /* 0: not forc
+ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+                               int scope)
+ {
++      /* List of CPUs that are not vulnerable and don't need KPTI */
++      static const struct midr_range kpti_safe_list[] = {
++              MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
++              MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++      };
+       char const *str = "command line option";
+       /*
+@@ -850,16 +861,8 @@ static bool unmap_kernel_at_el0(const st
+               return true;
+       /* Don't force KPTI for CPUs that are not vulnerable */
+-      switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+-      case MIDR_CAVIUM_THUNDERX2:
+-      case MIDR_BRCM_VULCAN:
+-      case MIDR_CORTEX_A53:
+-      case MIDR_CORTEX_A55:
+-      case MIDR_CORTEX_A57:
+-      case MIDR_CORTEX_A72:
+-      case MIDR_CORTEX_A73:
++      if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+               return false;
+-      }
+       /* Defer to CPU feature registers */
+       return !has_cpuid_feature(entry, scope);
diff --git a/queue-4.14/arm64-capabilities-add-support-for-features-enabled-early.patch b/queue-4.14/arm64-capabilities-add-support-for-features-enabled-early.patch
new file mode 100644 (file)
index 0000000..79c2c53
--- /dev/null
@@ -0,0 +1,243 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:08 +0200
+Subject: arm64: capabilities: Add support for features enabled early
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Julien Thierry <julien.thierry@arm.com>, Will Deacon <will.deacon@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-24-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit fd9d63da17daf09c0099e3d5e3f0c0f03d9b251b ]
+
+The kernel detects and uses some of the features based on the boot
+CPU and expects that all the following CPUs conform to it. e.g,
+with VHE and the boot CPU running at EL2, the kernel decides to
+keep the kernel running at EL2. If another CPU is brought up without
+this capability, we use custom hooks (via check_early_cpu_features())
+to handle it. To handle such capabilities add support for detecting
+and enabling capabilities based on the boot CPU.
+
+A bit is added to indicate if the capability should be detected
+early on the boot CPU. The infrastructure then ensures that such
+capabilities are probed and "enabled" early on in the boot CPU
+and, enabled on the subsequent CPUs.
+
+Cc: Julien Thierry <julien.thierry@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |   48 ++++++++++++++++++++++++------
+ arch/arm64/kernel/cpufeature.c      |   57 +++++++++++++++++++++++++++---------
+ 2 files changed, 83 insertions(+), 22 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -104,7 +104,7 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  *    value of a field in CPU ID feature register or checking the cpu
+  *    model. The capability provides a call back ( @matches() ) to
+  *    perform the check. Scope defines how the checks should be performed.
+- *    There are two cases:
++ *    There are three cases:
+  *
+  *     a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
+  *        matches. This implies, we have to run the check on all the
+@@ -117,6 +117,11 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  *        capability relies on a field in one of the CPU ID feature
+  *        registers, we use the sanitised value of the register from the
+  *        CPU feature infrastructure to make the decision.
++ *            Or
++ *     c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
++ *        feature. This category is for features that are "finalised"
++ *        (or used) by the kernel very early even before the SMP cpus
++ *        are brought up.
+  *
+  *    The process of detection is usually denoted by "update" capability
+  *    state in the code.
+@@ -136,6 +141,11 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  *    CPUs are treated "late CPUs" for capabilities determined by the boot
+  *    CPU.
+  *
++ *    At the moment there are two passes of finalising the capabilities.
++ *      a) Boot CPU scope capabilities - Finalised by primary boot CPU via
++ *         setup_boot_cpu_capabilities().
++ *      b) Everything except (a) - Run via setup_system_capabilities().
++ *
+  * 3) Verification: When a CPU is brought online (e.g, by user or by the
+  *    kernel), the kernel should make sure that it is safe to use the CPU,
+  *    by verifying that the CPU is compliant with the state of the
+@@ -144,12 +154,21 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  *    secondary_start_kernel()-> check_local_cpu_capabilities()
+  *
+  *    As explained in (2) above, capabilities could be finalised at
+- *    different points in the execution. Each CPU is verified against the
+- *    "finalised" capabilities and if there is a conflict, the kernel takes
+- *    an action, based on the severity (e.g, a CPU could be prevented from
+- *    booting or cause a kernel panic). The CPU is allowed to "affect" the
+- *    state of the capability, if it has not been finalised already.
+- *    See section 5 for more details on conflicts.
++ *    different points in the execution. Each newly booted CPU is verified
++ *    against the capabilities that have been finalised by the time it
++ *    boots.
++ *
++ *    a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
++ *    except for the primary boot CPU.
++ *
++ *    b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
++ *    user after the kernel boot are verified against the capability.
++ *
++ *    If there is a conflict, the kernel takes an action, based on the
++ *    severity (e.g, a CPU could be prevented from booting or cause a
++ *    kernel panic). The CPU is allowed to "affect" the state of the
++ *    capability, if it has not been finalised already. See section 5
++ *    for more details on conflicts.
+  *
+  * 4) Action: As mentioned in (2), the kernel can take an action for each
+  *    detected capability, on all CPUs on the system. Appropriate actions
+@@ -198,15 +217,26 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  */
+-/* Decide how the capability is detected. On a local CPU vs System wide */
++/*
++ * Decide how the capability is detected.
++ * On any local CPU vs System wide vs the primary boot CPU
++ */
+ #define ARM64_CPUCAP_SCOPE_LOCAL_CPU          ((u16)BIT(0))
+ #define ARM64_CPUCAP_SCOPE_SYSTEM             ((u16)BIT(1))
++/*
++ * The capabilitiy is detected on the Boot CPU and is used by kernel
++ * during early boot. i.e, the capability should be "detected" and
++ * "enabled" as early as possibly on all booting CPUs.
++ */
++#define ARM64_CPUCAP_SCOPE_BOOT_CPU           ((u16)BIT(2))
+ #define ARM64_CPUCAP_SCOPE_MASK                       \
+       (ARM64_CPUCAP_SCOPE_SYSTEM      |       \
+-       ARM64_CPUCAP_SCOPE_LOCAL_CPU)
++       ARM64_CPUCAP_SCOPE_LOCAL_CPU   |       \
++       ARM64_CPUCAP_SCOPE_BOOT_CPU)
+ #define SCOPE_SYSTEM                          ARM64_CPUCAP_SCOPE_SYSTEM
+ #define SCOPE_LOCAL_CPU                               ARM64_CPUCAP_SCOPE_LOCAL_CPU
++#define SCOPE_BOOT_CPU                                ARM64_CPUCAP_SCOPE_BOOT_CPU
+ #define SCOPE_ALL                             ARM64_CPUCAP_SCOPE_MASK
+ /*
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -485,7 +485,7 @@ static void __init init_cpu_ftr_reg(u32
+ }
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+-static void update_cpu_capabilities(u16 scope_mask);
++static void __init setup_boot_cpu_capabilities(void);
+ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+ {
+@@ -525,10 +525,10 @@ void __init init_cpu_features(struct cpu
+       }
+       /*
+-       * Run the errata work around and local feature checks on the
+-       * boot CPU, once we have initialised the cpu feature infrastructure.
++       * Detect and enable early CPU capabilities based on the boot CPU,
++       * after we have initialised the CPU feature infrastructure.
+        */
+-      update_cpu_capabilities(SCOPE_LOCAL_CPU);
++      setup_boot_cpu_capabilities();
+ }
+ static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+@@ -1219,13 +1219,24 @@ __enable_cpu_capabilities(const struct a
+               if (caps->cpu_enable) {
+                       /*
+-                       * Use stop_machine() as it schedules the work allowing
+-                       * us to modify PSTATE, instead of on_each_cpu() which
+-                       * uses an IPI, giving us a PSTATE that disappears when
+-                       * we return.
++                       * Capabilities with SCOPE_BOOT_CPU scope are finalised
++                       * before any secondary CPU boots. Thus, each secondary
++                       * will enable the capability as appropriate via
++                       * check_local_cpu_capabilities(). The only exception is
++                       * the boot CPU, for which the capability must be
++                       * enabled here. This approach avoids costly
++                       * stop_machine() calls for this case.
++                       *
++                       * Otherwise, use stop_machine() as it schedules the
++                       * work allowing us to modify PSTATE, instead of
++                       * on_each_cpu() which uses an IPI, giving us a PSTATE
++                       * that disappears when we return.
+                        */
+-                      stop_machine(__enable_cpu_capability, (void *)caps,
+-                                   cpu_online_mask);
++                      if (scope_mask & SCOPE_BOOT_CPU)
++                              caps->cpu_enable(caps);
++                      else
++                              stop_machine(__enable_cpu_capability,
++                                           (void *)caps, cpu_online_mask);
+               }
+       }
+ }
+@@ -1323,6 +1334,12 @@ static void check_early_cpu_features(voi
+ {
+       verify_cpu_run_el();
+       verify_cpu_asid_bits();
++      /*
++       * Early features are used by the kernel already. If there
++       * is a conflict, we cannot proceed further.
++       */
++      if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
++              cpu_panic_kernel();
+ }
+ static void
+@@ -1348,7 +1365,12 @@ verify_local_elf_hwcaps(const struct arm
+  */
+ static void verify_local_cpu_capabilities(void)
+ {
+-      if (!verify_local_cpu_caps(SCOPE_ALL))
++      /*
++       * The capabilities with SCOPE_BOOT_CPU are checked from
++       * check_early_cpu_features(), as they need to be verified
++       * on all secondary CPUs.
++       */
++      if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
+               cpu_die_early();
+       verify_local_elf_hwcaps(arm64_elf_hwcaps);
+@@ -1376,6 +1398,14 @@ void check_local_cpu_capabilities(void)
+               verify_local_cpu_capabilities();
+ }
++static void __init setup_boot_cpu_capabilities(void)
++{
++      /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
++      update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
++      /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
++      enable_cpu_capabilities(SCOPE_BOOT_CPU);
++}
++
+ DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+ EXPORT_SYMBOL(arm64_const_caps_ready);
+@@ -1397,10 +1427,11 @@ static void __init setup_system_capabili
+       /*
+        * We have finalised the system-wide safe feature
+        * registers, finalise the capabilities that depend
+-       * on it. Also enable all the available capabilities.
++       * on it. Also enable all the available capabilities,
++       * that are not enabled already.
+        */
+       update_cpu_capabilities(SCOPE_SYSTEM);
+-      enable_cpu_capabilities(SCOPE_ALL);
++      enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+ }
+ void __init setup_cpu_features(void)
diff --git a/queue-4.14/arm64-capabilities-allow-features-based-on-local-cpu-scope.patch b/queue-4.14/arm64-capabilities-allow-features-based-on-local-cpu-scope.patch
new file mode 100644 (file)
index 0000000..b68a605
--- /dev/null
@@ -0,0 +1,86 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:04 +0200
+Subject: arm64: capabilities: Allow features based on local CPU scope
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-20-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit fbd890b9b8497bab04c1d338bd97579a7bc53fab ]
+
+So far we have treated the feature capabilities as system wide
+and this wouldn't help with features that could be detected locally
+on one or more CPUs (e.g, KPTI, Software prefetch). This patch
+splits the feature detection to two phases :
+
+ 1) Local CPU features are checked on all boot time active CPUs.
+ 2) System wide features are checked only once after all CPUs are
+    active.
+
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -485,6 +485,7 @@ static void __init init_cpu_ftr_reg(u32
+ }
+ extern const struct arm64_cpu_capabilities arm64_errata[];
++static const struct arm64_cpu_capabilities arm64_features[];
+ static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+                                   u16 scope_mask, const char *info);
+@@ -526,11 +527,12 @@ void __init init_cpu_features(struct cpu
+       }
+       /*
+-       * Run the errata work around checks on the boot CPU, once we have
+-       * initialised the cpu feature infrastructure.
++       * Run the errata work around and local feature checks on the
++       * boot CPU, once we have initialised the cpu feature infrastructure.
+        */
+       update_cpu_capabilities(arm64_errata, SCOPE_LOCAL_CPU,
+                               "enabling workaround for");
++      update_cpu_capabilities(arm64_features, SCOPE_LOCAL_CPU, "detected:");
+ }
+ static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+@@ -1349,15 +1351,18 @@ void check_local_cpu_capabilities(void)
+       /*
+        * If we haven't finalised the system capabilities, this CPU gets
+-       * a chance to update the errata work arounds.
++       * a chance to update the errata work arounds and local features.
+        * Otherwise, this CPU should verify that it has all the system
+        * advertised capabilities.
+        */
+-      if (!sys_caps_initialised)
++      if (!sys_caps_initialised) {
+               update_cpu_capabilities(arm64_errata, SCOPE_LOCAL_CPU,
+                                       "enabling workaround for");
+-      else
++              update_cpu_capabilities(arm64_features, SCOPE_LOCAL_CPU,
++                                      "detected:");
++      } else {
+               verify_local_cpu_capabilities();
++      }
+ }
+ DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+@@ -1382,7 +1387,7 @@ void __init setup_cpu_features(void)
+       int cls;
+       /* Set the CPU feature capabilies */
+-      update_cpu_capabilities(arm64_features, SCOPE_ALL, "detected:");
++      update_cpu_capabilities(arm64_features, SCOPE_SYSTEM, "detected:");
+       update_cpu_capabilities(arm64_errata, SCOPE_SYSTEM,
+                               "enabling workaround for");
+       enable_cpu_capabilities(arm64_features, SCOPE_ALL);
diff --git a/queue-4.14/arm64-capabilities-change-scope-of-vhe-to-boot-cpu-feature.patch b/queue-4.14/arm64-capabilities-change-scope-of-vhe-to-boot-cpu-feature.patch
new file mode 100644 (file)
index 0000000..b2ff03d
--- /dev/null
@@ -0,0 +1,149 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:09 +0200
+Subject: arm64: capabilities: Change scope of VHE to Boot CPU feature
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-25-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 830dcc9f9a7cd26a812522a26efaacf7df6fc365 ]
+
+We expect all CPUs to be running at the same EL inside the kernel
+with or without VHE enabled and we have strict checks to ensure
+that any mismatch triggers a kernel panic. If VHE is enabled,
+we use the feature based on the boot CPU and all other CPUs
+should follow. This makes it a perfect candidate for a capability
+based on the boot CPU,  which should be matched by all the CPUs
+(both when is ON and OFF). This saves us some not-so-pretty
+hooks and special code, just for verifying the conflict.
+
+The patch also makes the VHE capability entry depend on
+CONFIG_ARM64_VHE.
+
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    6 +++++
+ arch/arm64/include/asm/virt.h       |    6 -----
+ arch/arm64/kernel/cpufeature.c      |    5 ++--
+ arch/arm64/kernel/smp.c             |   38 ------------------------------------
+ 4 files changed, 9 insertions(+), 46 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -283,6 +283,12 @@ extern struct arm64_ftr_reg arm64_ftr_re
+       (ARM64_CPUCAP_SCOPE_LOCAL_CPU           |       \
+        ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
++/*
++ * CPU feature used early in the boot based on the boot CPU. All secondary
++ * CPUs must match the state of the capability as detected by the boot CPU.
++ */
++#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
++
+ struct arm64_cpu_capabilities {
+       const char *desc;
+       u16 capability;
+--- a/arch/arm64/include/asm/virt.h
++++ b/arch/arm64/include/asm/virt.h
+@@ -102,12 +102,6 @@ static inline bool has_vhe(void)
+       return false;
+ }
+-#ifdef CONFIG_ARM64_VHE
+-extern void verify_cpu_run_el(void);
+-#else
+-static inline void verify_cpu_run_el(void) {}
+-#endif
+-
+ #endif /* __ASSEMBLY__ */
+ #endif /* ! __ASM__VIRT_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -982,13 +982,15 @@ static const struct arm64_cpu_capabiliti
+               .matches = cpufeature_pan_not_uao,
+       },
+ #endif /* CONFIG_ARM64_PAN */
++#ifdef CONFIG_ARM64_VHE
+       {
+               .desc = "Virtualization Host Extensions",
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+-              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
++              .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+               .matches = runs_at_el2,
+               .cpu_enable = cpu_copy_el2regs,
+       },
++#endif        /* CONFIG_ARM64_VHE */
+       {
+               .desc = "32-bit EL0 Support",
+               .capability = ARM64_HAS_32BIT_EL0,
+@@ -1332,7 +1334,6 @@ static bool verify_local_cpu_caps(u16 sc
+  */
+ static void check_early_cpu_features(void)
+ {
+-      verify_cpu_run_el();
+       verify_cpu_asid_bits();
+       /*
+        * Early features are used by the kernel already. If there
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -83,43 +83,6 @@ enum ipi_msg_type {
+       IPI_WAKEUP
+ };
+-#ifdef CONFIG_ARM64_VHE
+-
+-/* Whether the boot CPU is running in HYP mode or not*/
+-static bool boot_cpu_hyp_mode;
+-
+-static inline void save_boot_cpu_run_el(void)
+-{
+-      boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
+-}
+-
+-static inline bool is_boot_cpu_in_hyp_mode(void)
+-{
+-      return boot_cpu_hyp_mode;
+-}
+-
+-/*
+- * Verify that a secondary CPU is running the kernel at the same
+- * EL as that of the boot CPU.
+- */
+-void verify_cpu_run_el(void)
+-{
+-      bool in_el2 = is_kernel_in_hyp_mode();
+-      bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
+-
+-      if (in_el2 ^ boot_cpu_el2) {
+-              pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
+-                                      smp_processor_id(),
+-                                      in_el2 ? 2 : 1,
+-                                      boot_cpu_el2 ? 2 : 1);
+-              cpu_panic_kernel();
+-      }
+-}
+-
+-#else
+-static inline void save_boot_cpu_run_el(void) {}
+-#endif
+-
+ #ifdef CONFIG_HOTPLUG_CPU
+ static int op_cpu_kill(unsigned int cpu);
+ #else
+@@ -448,7 +411,6 @@ void __init smp_prepare_boot_cpu(void)
+        */
+       jump_label_init();
+       cpuinfo_store_boot_cpu();
+-      save_boot_cpu_run_el();
+ }
+ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
diff --git a/queue-4.14/arm64-capabilities-clean-up-midr-range-helpers.patch b/queue-4.14/arm64-capabilities-clean-up-midr-range-helpers.patch
new file mode 100644 (file)
index 0000000..7e01126
--- /dev/null
@@ -0,0 +1,279 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:10 +0200
+Subject: arm64: capabilities: Clean up midr range helpers
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-26-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 5e7951ce19abf4113645ae789c033917356ee96f ]
+
+We are about to introduce generic MIDR range helpers. Clean
+up the existing helpers in erratum handling, preparing them
+to use generic version.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |  109 +++++++++++++++++++++++------------------
+ 1 file changed, 62 insertions(+), 47 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -405,20 +405,38 @@ static bool has_ssbd_mitigation(const st
+ }
+ #endif        /* CONFIG_ARM64_SSBD */
+-#define MIDR_RANGE(model, min, max) \
+-      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+-      .matches = is_affected_midr_range, \
+-      .midr_model = model, \
+-      .midr_range_min = min, \
+-      .midr_range_max = max
+-
+-#define MIDR_ALL_VERSIONS(model) \
+-      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
+-      .matches = is_affected_midr_range, \
+-      .midr_model = model, \
+-      .midr_range_min = 0, \
++#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)     \
++      .matches = is_affected_midr_range,                      \
++      .midr_model = model,                                    \
++      .midr_range_min = MIDR_CPU_VAR_REV(v_min, r_min),       \
++      .midr_range_max = MIDR_CPU_VAR_REV(v_max, r_max)
++
++#define CAP_MIDR_ALL_VERSIONS(model)                                  \
++      .matches = is_affected_midr_range,                              \
++      .midr_model = model,                                            \
++      .midr_range_min = MIDR_CPU_VAR_REV(0, 0),                       \
+       .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
++#define MIDR_FIXED(rev, revidr_mask) \
++      .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
++
++#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)          \
++      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
++      CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
++
++/* Errata affecting a range of revisions of  given model variant */
++#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)    \
++      ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
++
++/* Errata affecting a single variant/revision of a model */
++#define ERRATA_MIDR_REV(model, var, rev)      \
++      ERRATA_MIDR_RANGE(model, var, rev, var, rev)
++
++/* Errata affecting all variants/revisions of a given a model */
++#define ERRATA_MIDR_ALL_VERSIONS(model)                               \
++      .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
++      CAP_MIDR_ALL_VERSIONS(model)
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #if   defined(CONFIG_ARM64_ERRATUM_826319) || \
+       defined(CONFIG_ARM64_ERRATUM_827319) || \
+@@ -427,7 +445,7 @@ const struct arm64_cpu_capabilities arm6
+       /* Cortex-A53 r0p[012] */
+               .desc = "ARM errata 826319, 827319, 824069",
+               .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+-              MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
++              ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
+               .cpu_enable = cpu_enable_cache_maint_trap,
+       },
+ #endif
+@@ -436,7 +454,7 @@ const struct arm64_cpu_capabilities arm6
+       /* Cortex-A53 r0p[01] */
+               .desc = "ARM errata 819472",
+               .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+-              MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
++              ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+               .cpu_enable = cpu_enable_cache_maint_trap,
+       },
+ #endif
+@@ -445,9 +463,9 @@ const struct arm64_cpu_capabilities arm6
+       /* Cortex-A57 r0p0 - r1p2 */
+               .desc = "ARM erratum 832075",
+               .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+-              MIDR_RANGE(MIDR_CORTEX_A57,
+-                         MIDR_CPU_VAR_REV(0, 0),
+-                         MIDR_CPU_VAR_REV(1, 2)),
++              ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
++                                0, 0,
++                                1, 2),
+       },
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_834220
+@@ -455,9 +473,9 @@ const struct arm64_cpu_capabilities arm6
+       /* Cortex-A57 r0p0 - r1p2 */
+               .desc = "ARM erratum 834220",
+               .capability = ARM64_WORKAROUND_834220,
+-              MIDR_RANGE(MIDR_CORTEX_A57,
+-                         MIDR_CPU_VAR_REV(0, 0),
+-                         MIDR_CPU_VAR_REV(1, 2)),
++              ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
++                                0, 0,
++                                1, 2),
+       },
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_845719
+@@ -465,7 +483,7 @@ const struct arm64_cpu_capabilities arm6
+       /* Cortex-A53 r0p[01234] */
+               .desc = "ARM erratum 845719",
+               .capability = ARM64_WORKAROUND_845719,
+-              MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
++              ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+       },
+ #endif
+ #ifdef CONFIG_CAVIUM_ERRATUM_23154
+@@ -473,7 +491,7 @@ const struct arm64_cpu_capabilities arm6
+       /* Cavium ThunderX, pass 1.x */
+               .desc = "Cavium erratum 23154",
+               .capability = ARM64_WORKAROUND_CAVIUM_23154,
+-              MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
++              ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
+       },
+ #endif
+ #ifdef CONFIG_CAVIUM_ERRATUM_27456
+@@ -481,15 +499,15 @@ const struct arm64_cpu_capabilities arm6
+       /* Cavium ThunderX, T88 pass 1.x - 2.1 */
+               .desc = "Cavium erratum 27456",
+               .capability = ARM64_WORKAROUND_CAVIUM_27456,
+-              MIDR_RANGE(MIDR_THUNDERX,
+-                         MIDR_CPU_VAR_REV(0, 0),
+-                         MIDR_CPU_VAR_REV(1, 1)),
++              ERRATA_MIDR_RANGE(MIDR_THUNDERX,
++                                0, 0,
++                                1, 1),
+       },
+       {
+       /* Cavium ThunderX, T81 pass 1.0 */
+               .desc = "Cavium erratum 27456",
+               .capability = ARM64_WORKAROUND_CAVIUM_27456,
+-              MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
++              ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+       },
+ #endif
+ #ifdef CONFIG_CAVIUM_ERRATUM_30115
+@@ -497,20 +515,21 @@ const struct arm64_cpu_capabilities arm6
+       /* Cavium ThunderX, T88 pass 1.x - 2.2 */
+               .desc = "Cavium erratum 30115",
+               .capability = ARM64_WORKAROUND_CAVIUM_30115,
+-              MIDR_RANGE(MIDR_THUNDERX, 0x00,
+-                         (1 << MIDR_VARIANT_SHIFT) | 2),
++              ERRATA_MIDR_RANGE(MIDR_THUNDERX,
++                                    0, 0,
++                                    1, 2),
+       },
+       {
+       /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
+               .desc = "Cavium erratum 30115",
+               .capability = ARM64_WORKAROUND_CAVIUM_30115,
+-              MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
++              ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
+       },
+       {
+       /* Cavium ThunderX, T83 pass 1.0 */
+               .desc = "Cavium erratum 30115",
+               .capability = ARM64_WORKAROUND_CAVIUM_30115,
+-              MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
++              ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
+       },
+ #endif
+       {
+@@ -531,9 +550,7 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .desc = "Qualcomm Technologies Falkor erratum 1003",
+               .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+-              MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
+-                         MIDR_CPU_VAR_REV(0, 0),
+-                         MIDR_CPU_VAR_REV(0, 0)),
++              ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+       },
+       {
+               .desc = "Qualcomm Technologies Kryo erratum 1003",
+@@ -547,9 +564,7 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .desc = "Qualcomm Technologies Falkor erratum 1009",
+               .capability = ARM64_WORKAROUND_REPEAT_TLBI,
+-              MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
+-                         MIDR_CPU_VAR_REV(0, 0),
+-                         MIDR_CPU_VAR_REV(0, 0)),
++              ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+       },
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_858921
+@@ -557,56 +572,56 @@ const struct arm64_cpu_capabilities arm6
+       /* Cortex-A73 all versions */
+               .desc = "ARM erratum 858921",
+               .capability = ARM64_WORKAROUND_858921,
+-              MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       },
+ #endif
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+               .cpu_enable = qcom_enable_link_stack_sanitization,
+       },
+       {
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+-              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+               .cpu_enable = qcom_enable_link_stack_sanitization,
+       },
+       {
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+-              MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
++              ERRATA_MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+ #endif
diff --git a/queue-4.14/arm64-capabilities-filter-the-entries-based-on-a-given-mask.patch b/queue-4.14/arm64-capabilities-filter-the-entries-based-on-a-given-mask.patch
new file mode 100644 (file)
index 0000000..b411c07
--- /dev/null
@@ -0,0 +1,138 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:01 +0200
+Subject: arm64: capabilities: Filter the entries based on a given mask
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-17-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit cce360b54ce6ca1bcf4b0a870ec076d83606775e ]
+
+While processing the list of capabilities, it is useful to
+filter out some of the entries based on the given mask for the
+scope of the capabilities to allow better control. This can be
+used later for handling LOCAL vs SYSTEM wide capabilities and more.
+All capabilities should have their scope set to either LOCAL_CPU or
+SYSTEM. No functional/flow change.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    1 +
+ arch/arm64/kernel/cpufeature.c      |   33 ++++++++++++++++++++++-----------
+ 2 files changed, 23 insertions(+), 11 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -207,6 +207,7 @@ extern struct arm64_ftr_reg arm64_ftr_re
+ #define SCOPE_SYSTEM                          ARM64_CPUCAP_SCOPE_SYSTEM
+ #define SCOPE_LOCAL_CPU                               ARM64_CPUCAP_SCOPE_LOCAL_CPU
++#define SCOPE_ALL                             ARM64_CPUCAP_SCOPE_MASK
+ /*
+  * Is it permitted for a late CPU to have this capability when system
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1164,10 +1164,12 @@ static bool __this_cpu_has_cap(const str
+ }
+ static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+-                                  const char *info)
++                                  u16 scope_mask, const char *info)
+ {
++      scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+       for (; caps->matches; caps++) {
+-              if (!caps->matches(caps, cpucap_default_scope(caps)))
++              if (!(caps->type & scope_mask) ||
++                  !caps->matches(caps, cpucap_default_scope(caps)))
+                       continue;
+               if (!cpus_have_cap(caps->capability) && caps->desc)
+@@ -1189,12 +1191,14 @@ static int __enable_cpu_capability(void
+  * CPUs
+  */
+ static void __init
+-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
++enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
++                      u16 scope_mask)
+ {
++      scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+       for (; caps->matches; caps++) {
+               unsigned int num = caps->capability;
+-              if (!cpus_have_cap(num))
++              if (!(caps->type & scope_mask) || !cpus_have_cap(num))
+                       continue;
+               /* Ensure cpus_have_const_cap(num) works */
+@@ -1236,12 +1240,18 @@ static inline void set_sys_caps_initiali
+  * Returns "false" on conflicts.
+  */
+ static bool
+-__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps_list)
++__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps_list,
++                      u16 scope_mask)
+ {
+       bool cpu_has_cap, system_has_cap;
+       const struct arm64_cpu_capabilities *caps;
++      scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
++
+       for (caps = caps_list; caps->matches; caps++) {
++              if (!(caps->type & scope_mask))
++                      continue;
++
+               cpu_has_cap = __this_cpu_has_cap(caps_list, caps->capability);
+               system_has_cap = cpus_have_cap(caps->capability);
+@@ -1304,7 +1314,7 @@ verify_local_elf_hwcaps(const struct arm
+ static void verify_local_cpu_features(void)
+ {
+-      if (!__verify_local_cpu_caps(arm64_features))
++      if (!__verify_local_cpu_caps(arm64_features, SCOPE_ALL))
+               cpu_die_early();
+ }
+@@ -1315,18 +1325,19 @@ static void verify_local_cpu_features(vo
+  */
+ static void verify_local_cpu_errata_workarounds(void)
+ {
+-      if (!__verify_local_cpu_caps(arm64_errata))
++      if (!__verify_local_cpu_caps(arm64_errata, SCOPE_ALL))
+               cpu_die_early();
+ }
+ static void update_cpu_errata_workarounds(void)
+ {
+-      update_cpu_capabilities(arm64_errata, "enabling workaround for");
++      update_cpu_capabilities(arm64_errata, SCOPE_ALL,
++                              "enabling workaround for");
+ }
+ static void __init enable_errata_workarounds(void)
+ {
+-      enable_cpu_capabilities(arm64_errata);
++      enable_cpu_capabilities(arm64_errata, SCOPE_ALL);
+ }
+ /*
+@@ -1368,8 +1379,8 @@ void check_local_cpu_capabilities(void)
+ static void __init setup_feature_capabilities(void)
+ {
+-      update_cpu_capabilities(arm64_features, "detected feature:");
+-      enable_cpu_capabilities(arm64_features);
++      update_cpu_capabilities(arm64_features, SCOPE_ALL, "detected:");
++      enable_cpu_capabilities(arm64_features, SCOPE_ALL);
+ }
+ DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
diff --git a/queue-4.14/arm64-capabilities-group-handling-of-features-and-errata-workarounds.patch b/queue-4.14/arm64-capabilities-group-handling-of-features-and-errata-workarounds.patch
new file mode 100644 (file)
index 0000000..b4a871a
--- /dev/null
@@ -0,0 +1,176 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:05 +0200
+Subject: arm64: capabilities: Group handling of features and errata workarounds
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-21-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit ed478b3f9e4ac97fdbe07007fb2662415de8fe25 ]
+
+Now that the features and errata workarounds have the same
+rules and flow, group the handling of the tables.
+
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   73 +++++++++++++++++++++++------------------
+ 1 file changed, 42 insertions(+), 31 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -485,9 +485,7 @@ static void __init init_cpu_ftr_reg(u32
+ }
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+-static const struct arm64_cpu_capabilities arm64_features[];
+-static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+-                                  u16 scope_mask, const char *info);
++static void update_cpu_capabilities(u16 scope_mask);
+ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+ {
+@@ -530,9 +528,7 @@ void __init init_cpu_features(struct cpu
+        * Run the errata work around and local feature checks on the
+        * boot CPU, once we have initialised the cpu feature infrastructure.
+        */
+-      update_cpu_capabilities(arm64_errata, SCOPE_LOCAL_CPU,
+-                              "enabling workaround for");
+-      update_cpu_capabilities(arm64_features, SCOPE_LOCAL_CPU, "detected:");
++      update_cpu_capabilities(SCOPE_LOCAL_CPU);
+ }
+ static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+@@ -1167,8 +1163,8 @@ static bool __this_cpu_has_cap(const str
+       return false;
+ }
+-static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+-                                  u16 scope_mask, const char *info)
++static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
++                                    u16 scope_mask, const char *info)
+ {
+       scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+       for (; caps->matches; caps++) {
+@@ -1182,6 +1178,13 @@ static void update_cpu_capabilities(cons
+       }
+ }
++static void update_cpu_capabilities(u16 scope_mask)
++{
++      __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
++      __update_cpu_capabilities(arm64_errata, scope_mask,
++                                "enabling workaround for");
++}
++
+ static int __enable_cpu_capability(void *arg)
+ {
+       const struct arm64_cpu_capabilities *cap = arg;
+@@ -1195,8 +1198,8 @@ static int __enable_cpu_capability(void
+  * CPUs
+  */
+ static void __init
+-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+-                      u16 scope_mask)
++__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
++                        u16 scope_mask)
+ {
+       scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
+       for (; caps->matches; caps++) {
+@@ -1221,6 +1224,12 @@ enable_cpu_capabilities(const struct arm
+       }
+ }
++static void __init enable_cpu_capabilities(u16 scope_mask)
++{
++      __enable_cpu_capabilities(arm64_features, scope_mask);
++      __enable_cpu_capabilities(arm64_errata, scope_mask);
++}
++
+ /*
+  * Flag to indicate if we have computed the system wide
+  * capabilities based on the boot time active CPUs. This
+@@ -1294,6 +1303,12 @@ __verify_local_cpu_caps(const struct arm
+       return true;
+ }
++static bool verify_local_cpu_caps(u16 scope_mask)
++{
++      return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
++             __verify_local_cpu_caps(arm64_features, scope_mask);
++}
++
+ /*
+  * Check for CPU features that are used in early boot
+  * based on the Boot CPU value.
+@@ -1327,15 +1342,9 @@ verify_local_elf_hwcaps(const struct arm
+  */
+ static void verify_local_cpu_capabilities(void)
+ {
+-      /*
+-       * The CPU Errata work arounds are detected and applied at boot time
+-       * and the related information is freed soon after. If the new CPU
+-       * requires an errata not detected at boot, fail this CPU.
+-       */
+-      if (!__verify_local_cpu_caps(arm64_errata, SCOPE_ALL))
+-              cpu_die_early();
+-      if (!__verify_local_cpu_caps(arm64_features, SCOPE_ALL))
++      if (!verify_local_cpu_caps(SCOPE_ALL))
+               cpu_die_early();
++
+       verify_local_elf_hwcaps(arm64_elf_hwcaps);
+       if (system_supports_32bit_el0())
+               verify_local_elf_hwcaps(compat_elf_hwcaps);
+@@ -1355,14 +1364,10 @@ void check_local_cpu_capabilities(void)
+        * Otherwise, this CPU should verify that it has all the system
+        * advertised capabilities.
+        */
+-      if (!sys_caps_initialised) {
+-              update_cpu_capabilities(arm64_errata, SCOPE_LOCAL_CPU,
+-                                      "enabling workaround for");
+-              update_cpu_capabilities(arm64_features, SCOPE_LOCAL_CPU,
+-                                      "detected:");
+-      } else {
++      if (!sys_caps_initialised)
++              update_cpu_capabilities(SCOPE_LOCAL_CPU);
++      else
+               verify_local_cpu_capabilities();
+-      }
+ }
+ DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+@@ -1381,17 +1386,23 @@ bool this_cpu_has_cap(unsigned int cap)
+               __this_cpu_has_cap(arm64_errata, cap));
+ }
++static void __init setup_system_capabilities(void)
++{
++      /*
++       * We have finalised the system-wide safe feature
++       * registers, finalise the capabilities that depend
++       * on it. Also enable all the available capabilities.
++       */
++      update_cpu_capabilities(SCOPE_SYSTEM);
++      enable_cpu_capabilities(SCOPE_ALL);
++}
++
+ void __init setup_cpu_features(void)
+ {
+       u32 cwg;
+       int cls;
+-      /* Set the CPU feature capabilies */
+-      update_cpu_capabilities(arm64_features, SCOPE_SYSTEM, "detected:");
+-      update_cpu_capabilities(arm64_errata, SCOPE_SYSTEM,
+-                              "enabling workaround for");
+-      enable_cpu_capabilities(arm64_features, SCOPE_ALL);
+-      enable_cpu_capabilities(arm64_errata, SCOPE_ALL);
++      setup_system_capabilities();
+       mark_const_caps_ready();
+       setup_elf_hwcaps(arm64_elf_hwcaps);
diff --git a/queue-4.14/arm64-capabilities-introduce-weak-features-based-on-local-cpu.patch b/queue-4.14/arm64-capabilities-introduce-weak-features-based-on-local-cpu.patch
new file mode 100644 (file)
index 0000000..9409471
--- /dev/null
@@ -0,0 +1,58 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:06 +0200
+Subject: arm64: capabilities: Introduce weak features based on local CPU
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-22-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 5c137714dd8cae464dbd5f028c07af149e6d09fc ]
+
+Now that we have the flexibility of defining system features based
+on individual CPUs, introduce CPU feature type that can be detected
+on a local SCOPE and ignores the conflict on late CPUs. This is
+applicable for ARM64_HAS_NO_HW_PREFETCH, where it is fine for
+the system to have CPUs without hardware prefetch turning up
+later. We only suffer a performance penalty, nothing fatal.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    8 ++++++++
+ arch/arm64/kernel/cpufeature.c      |    2 +-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -235,6 +235,14 @@ extern struct arm64_ftr_reg arm64_ftr_re
+  */
+ #define ARM64_CPUCAP_SYSTEM_FEATURE   \
+       (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
++/*
++ * CPU feature detected at boot time based on feature of one or more CPUs.
++ * All possible conflicts for a late CPU are ignored.
++ */
++#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE           \
++      (ARM64_CPUCAP_SCOPE_LOCAL_CPU           |       \
++       ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU     |       \
++       ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+ struct arm64_cpu_capabilities {
+       const char *desc;
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -959,7 +959,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+-              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
++              .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+               .matches = has_no_hw_prefetch,
+       },
+ #ifdef CONFIG_ARM64_UAO
diff --git a/queue-4.14/arm64-capabilities-move-errata-processing-code.patch b/queue-4.14/arm64-capabilities-move-errata-processing-code.patch
new file mode 100644 (file)
index 0000000..e39e403
--- /dev/null
@@ -0,0 +1,164 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:57 +0200
+Subject: arm64: capabilities: Move errata processing code
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-13-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 1e89baed5d50d2b8d9fd420830902570270703f1 ]
+
+We have errata work around processing code in cpu_errata.c,
+which calls back into helpers defined in cpufeature.c. Now
+that we are going to make the handling of capabilities
+generic, by adding the information to each capability,
+move the errata work around specific processing code.
+No functional changes.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    7 -----
+ arch/arm64/kernel/cpu_errata.c      |   33 ---------------------------
+ arch/arm64/kernel/cpufeature.c      |   43 +++++++++++++++++++++++++++++++++---
+ 3 files changed, 40 insertions(+), 43 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -230,15 +230,8 @@ static inline bool id_aa64pfr0_32bit_el0
+ }
+ void __init setup_cpu_features(void);
+-
+-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+-                          const char *info);
+-void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
+ void check_local_cpu_capabilities(void);
+-void update_cpu_errata_workarounds(void);
+-void __init enable_errata_workarounds(void);
+-void verify_local_cpu_errata_workarounds(void);
+ u64 read_sanitised_ftr_reg(u32 id);
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -621,36 +621,3 @@ const struct arm64_cpu_capabilities arm6
+       {
+       }
+ };
+-
+-/*
+- * The CPU Errata work arounds are detected and applied at boot time
+- * and the related information is freed soon after. If the new CPU requires
+- * an errata not detected at boot, fail this CPU.
+- */
+-void verify_local_cpu_errata_workarounds(void)
+-{
+-      const struct arm64_cpu_capabilities *caps = arm64_errata;
+-
+-      for (; caps->matches; caps++) {
+-              if (cpus_have_cap(caps->capability)) {
+-                      if (caps->cpu_enable)
+-                              caps->cpu_enable(caps);
+-              } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
+-                      pr_crit("CPU%d: Requires work around for %s, not detected"
+-                                      " at boot time\n",
+-                              smp_processor_id(),
+-                              caps->desc ? : "an erratum");
+-                      cpu_die_early();
+-              }
+-      }
+-}
+-
+-void update_cpu_errata_workarounds(void)
+-{
+-      update_cpu_capabilities(arm64_errata, "enabling workaround for");
+-}
+-
+-void __init enable_errata_workarounds(void)
+-{
+-      enable_cpu_capabilities(arm64_errata);
+-}
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -484,6 +484,9 @@ static void __init init_cpu_ftr_reg(u32
+       reg->user_mask = user_mask;
+ }
++extern const struct arm64_cpu_capabilities arm64_errata[];
++static void update_cpu_errata_workarounds(void);
++
+ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+ {
+       /* Before we start using the tables, make sure it is sorted */
+@@ -1160,8 +1163,8 @@ static bool __this_cpu_has_cap(const str
+       return false;
+ }
+-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+-                          const char *info)
++static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
++                                  const char *info)
+ {
+       for (; caps->matches; caps++) {
+               if (!caps->matches(caps, caps->def_scope))
+@@ -1185,7 +1188,8 @@ static int __enable_cpu_capability(void
+  * Run through the enabled capabilities and enable() it on all active
+  * CPUs
+  */
+-void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
++static void __init
++enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+       for (; caps->matches; caps++) {
+               unsigned int num = caps->capability;
+@@ -1268,6 +1272,39 @@ verify_local_cpu_features(const struct a
+ }
+ /*
++ * The CPU Errata work arounds are detected and applied at boot time
++ * and the related information is freed soon after. If the new CPU requires
++ * an errata not detected at boot, fail this CPU.
++ */
++static void verify_local_cpu_errata_workarounds(void)
++{
++      const struct arm64_cpu_capabilities *caps = arm64_errata;
++
++      for (; caps->matches; caps++) {
++              if (cpus_have_cap(caps->capability)) {
++                      if (caps->cpu_enable)
++                              caps->cpu_enable(caps);
++              } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
++                      pr_crit("CPU%d: Requires work around for %s, not detected"
++                                      " at boot time\n",
++                              smp_processor_id(),
++                              caps->desc ? : "an erratum");
++                      cpu_die_early();
++              }
++      }
++}
++
++static void update_cpu_errata_workarounds(void)
++{
++      update_cpu_capabilities(arm64_errata, "enabling workaround for");
++}
++
++static void __init enable_errata_workarounds(void)
++{
++      enable_cpu_capabilities(arm64_errata);
++}
++
++/*
+  * Run through the enabled system capabilities and enable() it on this CPU.
+  * The capabilities were decided based on the available CPUs at the boot time.
+  * Any new CPU should match the system wide status of the capability. If the
diff --git a/queue-4.14/arm64-capabilities-move-errata-work-around-check-on-boot-cpu.patch b/queue-4.14/arm64-capabilities-move-errata-work-around-check-on-boot-cpu.patch
new file mode 100644 (file)
index 0000000..5320c5f
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:56 +0200
+Subject: arm64: capabilities: Move errata work around check on boot CPU
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-12-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 5e91107b06811f0ca147cebbedce53626c9c4443 ]
+
+We trigger CPU errata work around check on the boot CPU from
+smp_prepare_boot_cpu() to make sure that we run the checks only
+after the CPU feature infrastructure is initialised. While this
+is correct, we can also do this from init_cpu_features() which
+initilises the infrastructure, and is called only on the
+Boot CPU. This helps to consolidate the CPU capability handling
+to cpufeature.c. No functional changes.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |    5 +++++
+ arch/arm64/kernel/smp.c        |    6 ------
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -521,6 +521,11 @@ void __init init_cpu_features(struct cpu
+               init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+       }
++      /*
++       * Run the errata work around checks on the boot CPU, once we have
++       * initialised the cpu feature infrastructure.
++       */
++      update_cpu_errata_workarounds();
+ }
+ static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -449,12 +449,6 @@ void __init smp_prepare_boot_cpu(void)
+       jump_label_init();
+       cpuinfo_store_boot_cpu();
+       save_boot_cpu_run_el();
+-      /*
+-       * Run the errata work around checks on the boot CPU, once we have
+-       * initialised the cpu feature infrastructure from
+-       * cpuinfo_store_boot_cpu() above.
+-       */
+-      update_cpu_errata_workarounds();
+ }
+ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
diff --git a/queue-4.14/arm64-capabilities-prepare-for-fine-grained-capabilities.patch b/queue-4.14/arm64-capabilities-prepare-for-fine-grained-capabilities.patch
new file mode 100644 (file)
index 0000000..c1ccd48
--- /dev/null
@@ -0,0 +1,403 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:58 +0200
+Subject: arm64: capabilities: Prepare for fine grained capabilities
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-14-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 143ba05d867af34827faf99e0eed4de27106c7cb ]
+
+We use arm64_cpu_capabilities to represent CPU ELF HWCAPs exposed
+to the userspace and the CPU hwcaps used by the kernel, which
+include cpu features and CPU errata work arounds. Capabilities
+have some properties that decide how they should be treated :
+
+ 1) Detection, i.e scope : A cap could be "detected" either :
+    - if it is present on at least one CPU (SCOPE_LOCAL_CPU)
+       Or
+    - if it is present on all the CPUs (SCOPE_SYSTEM)
+
+ 2) When is it enabled ? - A cap is treated as "enabled" when the
+  system takes some action based on whether the capability is detected or
+  not. e.g, setting some control register, patching the kernel code.
+  Right now, we treat all caps are enabled at boot-time, after all
+  the CPUs are brought up by the kernel. But there are certain caps,
+  which are enabled early during the boot (e.g, VHE, GIC_CPUIF for NMI)
+  and kernel starts using them, even before the secondary CPUs are brought
+  up. We would need a way to describe this for each capability.
+
+ 3) Conflict on a late CPU - When a CPU is brought up, it is checked
+  against the caps that are known to be enabled on the system (via
+  verify_local_cpu_capabilities()). Based on the state of the capability
+  on the CPU vs. that of System we could have the following combinations
+  of conflict.
+
+       x-----------------------------x
+       | Type  | System   | Late CPU |
+       ------------------------------|
+       |  a    |   y      |    n     |
+       ------------------------------|
+       |  b    |   n      |    y     |
+       x-----------------------------x
+
+  Case (a) is not permitted for caps which are system features, which the
+  system expects all the CPUs to have (e.g VHE). While (a) is ignored for
+  all errata work arounds. However, there could be exceptions to the plain
+  filtering approach. e.g, KPTI is an optional feature for a late CPU as
+  long as the system already enables it.
+
+  Case (b) is not permitted for errata work arounds which requires some
+  work around, which cannot be delayed. And we ignore (b) for features.
+  Here, yet again, KPTI is an exception, where if a late CPU needs KPTI we
+  are too late to enable it (because we change the allocation of ASIDs
+  etc).
+
+So this calls for a lot more fine grained behavior for each capability.
+And if we define all the attributes to control their behavior properly,
+we may be able to use a single table for the CPU hwcaps (which cover
+errata and features, not the ELF HWCAPs). This is a prepartory step
+to get there. More bits would be added for the properties listed above.
+
+We are going to use a bit-mask to encode all the properties of a
+capabilities. This patch encodes the "SCOPE" of the capability.
+
+As such there is no change in how the capabilities are treated.
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |  105 +++++++++++++++++++++++++++++++++---
+ arch/arm64/kernel/cpu_errata.c      |   12 ++--
+ arch/arm64/kernel/cpufeature.c      |   34 +++++------
+ 3 files changed, 122 insertions(+), 29 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -85,16 +85,104 @@ struct arm64_ftr_reg {
+ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
+-/* scope of capability check */
+-enum {
+-      SCOPE_SYSTEM,
+-      SCOPE_LOCAL_CPU,
+-};
++/*
++ * CPU capabilities:
++ *
++ * We use arm64_cpu_capabilities to represent system features, errata work
++ * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
++ * ELF HWCAPs (which are exposed to user).
++ *
++ * To support systems with heterogeneous CPUs, we need to make sure that we
++ * detect the capabilities correctly on the system and take appropriate
++ * measures to ensure there are no incompatibilities.
++ *
++ * This comment tries to explain how we treat the capabilities.
++ * Each capability has the following list of attributes :
++ *
++ * 1) Scope of Detection : The system detects a given capability by
++ *    performing some checks at runtime. This could be, e.g, checking the
++ *    value of a field in CPU ID feature register or checking the cpu
++ *    model. The capability provides a call back ( @matches() ) to
++ *    perform the check. Scope defines how the checks should be performed.
++ *    There are two cases:
++ *
++ *     a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
++ *        matches. This implies, we have to run the check on all the
++ *        booting CPUs, until the system decides that state of the
++ *        capability is finalised. (See section 2 below)
++ *            Or
++ *     b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
++ *        matches. This implies, we run the check only once, when the
++ *        system decides to finalise the state of the capability. If the
++ *        capability relies on a field in one of the CPU ID feature
++ *        registers, we use the sanitised value of the register from the
++ *        CPU feature infrastructure to make the decision.
++ *
++ *    The process of detection is usually denoted by "update" capability
++ *    state in the code.
++ *
++ * 2) Finalise the state : The kernel should finalise the state of a
++ *    capability at some point during its execution and take necessary
++ *    actions if any. Usually, this is done, after all the boot-time
++ *    enabled CPUs are brought up by the kernel, so that it can make
++ *    better decision based on the available set of CPUs. However, there
++ *    are some special cases, where the action is taken during the early
++ *    boot by the primary boot CPU. (e.g, running the kernel at EL2 with
++ *    Virtualisation Host Extensions). The kernel usually disallows any
++ *    changes to the state of a capability once it finalises the capability
++ *    and takes any action, as it may be impossible to execute the actions
++ *    safely. A CPU brought up after a capability is "finalised" is
++ *    referred to as "Late CPU" w.r.t the capability. e.g, all secondary
++ *    CPUs are treated "late CPUs" for capabilities determined by the boot
++ *    CPU.
++ *
++ * 3) Verification: When a CPU is brought online (e.g, by user or by the
++ *    kernel), the kernel should make sure that it is safe to use the CPU,
++ *    by verifying that the CPU is compliant with the state of the
++ *    capabilities finalised already. This happens via :
++ *
++ *    secondary_start_kernel()-> check_local_cpu_capabilities()
++ *
++ *    As explained in (2) above, capabilities could be finalised at
++ *    different points in the execution. Each CPU is verified against the
++ *    "finalised" capabilities and if there is a conflict, the kernel takes
++ *    an action, based on the severity (e.g, a CPU could be prevented from
++ *    booting or cause a kernel panic). The CPU is allowed to "affect" the
++ *    state of the capability, if it has not been finalised already.
++ *
++ * 4) Action: As mentioned in (2), the kernel can take an action for each
++ *    detected capability, on all CPUs on the system. Appropriate actions
++ *    include, turning on an architectural feature, modifying the control
++ *    registers (e.g, SCTLR, TCR etc.) or patching the kernel via
++ *    alternatives. The kernel patching is batched and performed at later
++ *    point. The actions are always initiated only after the capability
++ *    is finalised. This is usally denoted by "enabling" the capability.
++ *    The actions are initiated as follows :
++ *    a) Action is triggered on all online CPUs, after the capability is
++ *    finalised, invoked within the stop_machine() context from
++ *    enable_cpu_capabilitie().
++ *
++ *    b) Any late CPU, brought up after (1), the action is triggered via:
++ *
++ *      check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
++ *
++ */
++
++
++/* Decide how the capability is detected. On a local CPU vs System wide */
++#define ARM64_CPUCAP_SCOPE_LOCAL_CPU          ((u16)BIT(0))
++#define ARM64_CPUCAP_SCOPE_SYSTEM             ((u16)BIT(1))
++#define ARM64_CPUCAP_SCOPE_MASK                       \
++      (ARM64_CPUCAP_SCOPE_SYSTEM      |       \
++       ARM64_CPUCAP_SCOPE_LOCAL_CPU)
++
++#define SCOPE_SYSTEM                          ARM64_CPUCAP_SCOPE_SYSTEM
++#define SCOPE_LOCAL_CPU                               ARM64_CPUCAP_SCOPE_LOCAL_CPU
+ struct arm64_cpu_capabilities {
+       const char *desc;
+       u16 capability;
+-      int def_scope;                  /* default scope */
++      u16 type;
+       bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
+       /*
+        * Take the appropriate actions to enable this capability for this CPU.
+@@ -119,6 +207,11 @@ struct arm64_cpu_capabilities {
+       };
+ };
++static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
++{
++      return cap->type & ARM64_CPUCAP_SCOPE_MASK;
++}
++
+ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+ extern struct static_key_false arm64_const_caps_ready;
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -406,14 +406,14 @@ static bool has_ssbd_mitigation(const st
+ #endif        /* CONFIG_ARM64_SSBD */
+ #define MIDR_RANGE(model, min, max) \
+-      .def_scope = SCOPE_LOCAL_CPU, \
++      .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU, \
+       .matches = is_affected_midr_range, \
+       .midr_model = model, \
+       .midr_range_min = min, \
+       .midr_range_max = max
+ #define MIDR_ALL_VERSIONS(model) \
+-      .def_scope = SCOPE_LOCAL_CPU, \
++      .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU, \
+       .matches = is_affected_midr_range, \
+       .midr_model = model, \
+       .midr_range_min = 0, \
+@@ -517,14 +517,14 @@ const struct arm64_cpu_capabilities arm6
+               .desc = "Mismatched cache line size",
+               .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
+               .matches = has_mismatched_cache_type,
+-              .def_scope = SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
+               .cpu_enable = cpu_enable_trap_ctr_access,
+       },
+       {
+               .desc = "Mismatched cache type",
+               .capability = ARM64_MISMATCHED_CACHE_TYPE,
+               .matches = has_mismatched_cache_type,
+-              .def_scope = SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
+               .cpu_enable = cpu_enable_trap_ctr_access,
+       },
+ #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+@@ -538,7 +538,7 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .desc = "Qualcomm Technologies Kryo erratum 1003",
+               .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+-              .def_scope = SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
+               .midr_model = MIDR_QCOM_KRYO,
+               .matches = is_kryo_midr,
+       },
+@@ -613,7 +613,7 @@ const struct arm64_cpu_capabilities arm6
+ #ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypass Disable",
+-              .def_scope = SCOPE_LOCAL_CPU,
++              .type = ARM64_CPUCAP_SCOPE_LOCAL_CPU,
+               .capability = ARM64_SSBD,
+               .matches = has_ssbd_mitigation,
+       },
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -924,7 +924,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "GIC system register CPU interface",
+               .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_useable_gicv3_cpuif,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .field_pos = ID_AA64PFR0_GIC_SHIFT,
+@@ -935,7 +935,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Privileged Access Never",
+               .capability = ARM64_HAS_PAN,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR1_EL1,
+               .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+@@ -948,7 +948,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "LSE atomic instructions",
+               .capability = ARM64_HAS_LSE_ATOMICS,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR0_EL1,
+               .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
+@@ -959,14 +959,14 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_no_hw_prefetch,
+       },
+ #ifdef CONFIG_ARM64_UAO
+       {
+               .desc = "User Access Override",
+               .capability = ARM64_HAS_UAO,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .field_pos = ID_AA64MMFR2_UAO_SHIFT,
+@@ -980,21 +980,21 @@ static const struct arm64_cpu_capabiliti
+ #ifdef CONFIG_ARM64_PAN
+       {
+               .capability = ARM64_ALT_PAN_NOT_UAO,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = cpufeature_pan_not_uao,
+       },
+ #endif /* CONFIG_ARM64_PAN */
+       {
+               .desc = "Virtualization Host Extensions",
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = runs_at_el2,
+               .cpu_enable = cpu_copy_el2regs,
+       },
+       {
+               .desc = "32-bit EL0 Support",
+               .capability = ARM64_HAS_32BIT_EL0,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .sign = FTR_UNSIGNED,
+@@ -1004,14 +1004,14 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Reduced HYP mapping offset",
+               .capability = ARM64_HYP_OFFSET_LOW,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = hyp_offset_low,
+       },
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       {
+               .desc = "Kernel page table isolation (KPTI)",
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = unmap_kernel_at_el0,
+               .cpu_enable = kpti_install_ng_mappings,
+       },
+@@ -1019,7 +1019,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               /* FP/SIMD is not implemented */
+               .capability = ARM64_HAS_NO_FPSIMD,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .min_field_value = 0,
+               .matches = has_no_fpsimd,
+       },
+@@ -1027,7 +1027,7 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Data cache clean to Point of Persistence",
+               .capability = ARM64_HAS_DCPOP,
+-              .def_scope = SCOPE_SYSTEM,
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR1_EL1,
+               .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+@@ -1037,16 +1037,16 @@ static const struct arm64_cpu_capabiliti
+       {},
+ };
+-#define HWCAP_CAP(reg, field, s, min_value, type, cap)        \
++#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)    \
+       {                                                       \
+               .desc = #cap,                                   \
+-              .def_scope = SCOPE_SYSTEM,                      \
++              .type = ARM64_CPUCAP_SCOPE_SYSTEM,              \
+               .matches = has_cpuid_feature,                   \
+               .sys_reg = reg,                                 \
+               .field_pos = field,                             \
+               .sign = s,                                      \
+               .min_field_value = min_value,                   \
+-              .hwcap_type = type,                             \
++              .hwcap_type = cap_type,                         \
+               .hwcap = cap,                                   \
+       }
+@@ -1140,7 +1140,7 @@ static void __init setup_elf_hwcaps(cons
+       /* We support emulation of accesses to CPU ID feature registers */
+       elf_hwcap |= HWCAP_CPUID;
+       for (; hwcaps->matches; hwcaps++)
+-              if (hwcaps->matches(hwcaps, hwcaps->def_scope))
++              if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
+                       cap_set_elf_hwcap(hwcaps);
+ }
+@@ -1167,7 +1167,7 @@ static void update_cpu_capabilities(cons
+                                   const char *info)
+ {
+       for (; caps->matches; caps++) {
+-              if (!caps->matches(caps, caps->def_scope))
++              if (!caps->matches(caps, cpucap_default_scope(caps)))
+                       continue;
+               if (!cpus_have_cap(caps->capability) && caps->desc)
diff --git a/queue-4.14/arm64-capabilities-prepare-for-grouping-features-and-errata-work-arounds.patch b/queue-4.14/arm64-capabilities-prepare-for-grouping-features-and-errata-work-arounds.patch
new file mode 100644 (file)
index 0000000..1bf13c1
--- /dev/null
@@ -0,0 +1,131 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:02 +0200
+Subject: arm64: capabilities: Prepare for grouping features and errata work arounds
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-18-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 600b9c919c2f4d07a7bf67864086aa3432224674 ]
+
+We are about to group the handling of all capabilities (features
+and errata workarounds). This patch open codes the wrapper routines
+to make it easier to merge the handling.
+
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   58 ++++++++++++-----------------------------
+ 1 file changed, 18 insertions(+), 40 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -485,7 +485,8 @@ static void __init init_cpu_ftr_reg(u32
+ }
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+-static void update_cpu_errata_workarounds(void);
++static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
++                                  u16 scope_mask, const char *info);
+ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+ {
+@@ -528,7 +529,8 @@ void __init init_cpu_features(struct cpu
+        * Run the errata work around checks on the boot CPU, once we have
+        * initialised the cpu feature infrastructure.
+        */
+-      update_cpu_errata_workarounds();
++      update_cpu_capabilities(arm64_errata, SCOPE_ALL,
++                              "enabling workaround for");
+ }
+ static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+@@ -1312,33 +1314,6 @@ verify_local_elf_hwcaps(const struct arm
+               }
+ }
+-static void verify_local_cpu_features(void)
+-{
+-      if (!__verify_local_cpu_caps(arm64_features, SCOPE_ALL))
+-              cpu_die_early();
+-}
+-
+-/*
+- * The CPU Errata work arounds are detected and applied at boot time
+- * and the related information is freed soon after. If the new CPU requires
+- * an errata not detected at boot, fail this CPU.
+- */
+-static void verify_local_cpu_errata_workarounds(void)
+-{
+-      if (!__verify_local_cpu_caps(arm64_errata, SCOPE_ALL))
+-              cpu_die_early();
+-}
+-
+-static void update_cpu_errata_workarounds(void)
+-{
+-      update_cpu_capabilities(arm64_errata, SCOPE_ALL,
+-                              "enabling workaround for");
+-}
+-
+-static void __init enable_errata_workarounds(void)
+-{
+-      enable_cpu_capabilities(arm64_errata, SCOPE_ALL);
+-}
+ /*
+  * Run through the enabled system capabilities and enable() it on this CPU.
+@@ -1350,8 +1325,15 @@ static void __init enable_errata_workaro
+  */
+ static void verify_local_cpu_capabilities(void)
+ {
+-      verify_local_cpu_errata_workarounds();
+-      verify_local_cpu_features();
++      /*
++       * The CPU Errata work arounds are detected and applied at boot time
++       * and the related information is freed soon after. If the new CPU
++       * requires an errata not detected at boot, fail this CPU.
++       */
++      if (!__verify_local_cpu_caps(arm64_errata, SCOPE_ALL))
++              cpu_die_early();
++      if (!__verify_local_cpu_caps(arm64_features, SCOPE_ALL))
++              cpu_die_early();
+       verify_local_elf_hwcaps(arm64_elf_hwcaps);
+       if (system_supports_32bit_el0())
+               verify_local_elf_hwcaps(compat_elf_hwcaps);
+@@ -1372,17 +1354,12 @@ void check_local_cpu_capabilities(void)
+        * advertised capabilities.
+        */
+       if (!sys_caps_initialised)
+-              update_cpu_errata_workarounds();
++              update_cpu_capabilities(arm64_errata, SCOPE_ALL,
++                                      "enabling workaround for");
+       else
+               verify_local_cpu_capabilities();
+ }
+-static void __init setup_feature_capabilities(void)
+-{
+-      update_cpu_capabilities(arm64_features, SCOPE_ALL, "detected:");
+-      enable_cpu_capabilities(arm64_features, SCOPE_ALL);
+-}
+-
+ DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+ EXPORT_SYMBOL(arm64_const_caps_ready);
+@@ -1405,8 +1382,9 @@ void __init setup_cpu_features(void)
+       int cls;
+       /* Set the CPU feature capabilies */
+-      setup_feature_capabilities();
+-      enable_errata_workarounds();
++      update_cpu_capabilities(arm64_features, SCOPE_ALL, "detected:");
++      enable_cpu_capabilities(arm64_features, SCOPE_ALL);
++      enable_cpu_capabilities(arm64_errata, SCOPE_ALL);
+       mark_const_caps_ready();
+       setup_elf_hwcaps(arm64_elf_hwcaps);
diff --git a/queue-4.14/arm64-capabilities-restrict-kpti-detection-to-boot-time-cpus.patch b/queue-4.14/arm64-capabilities-restrict-kpti-detection-to-boot-time-cpus.patch
new file mode 100644 (file)
index 0000000..ec00b4a
--- /dev/null
@@ -0,0 +1,92 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:07 +0200
+Subject: arm64: capabilities: Restrict KPTI detection to boot-time CPUs
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-23-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit d3aec8a28be3b88bf75442e7c24fd9da8d69a6df ]
+
+KPTI is treated as a system wide feature and is only detected if all
+the CPUs in the sysetm needs the defense, unless it is forced via kernel
+command line. This leaves a system with a mix of CPUs with and without
+the defense vulnerable. Also, if a late CPU needs KPTI but KPTI was not
+activated at boot time, the CPU is currently allowed to boot, which is a
+potential security vulnerability.
+This patch ensures that the KPTI is turned on if at least one CPU detects
+the capability (i.e, change scope to SCOPE_LOCAL_CPU). Also rejetcs a late
+CPU, if it requires the defense, when the system hasn't enabled it,
+
+Cc: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    9 +++++++++
+ arch/arm64/kernel/cpufeature.c      |   16 +++++++++++-----
+ 2 files changed, 20 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -244,6 +244,15 @@ extern struct arm64_ftr_reg arm64_ftr_re
+        ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU     |       \
+        ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
++/*
++ * CPU feature detected at boot time, on one or more CPUs. A late CPU
++ * is not allowed to have the capability when the system doesn't have it.
++ * It is Ok for a late CPU to miss the feature.
++ */
++#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE        \
++      (ARM64_CPUCAP_SCOPE_LOCAL_CPU           |       \
++       ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
++
+ struct arm64_cpu_capabilities {
+       const char *desc;
+       u16 capability;
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -824,10 +824,9 @@ static bool has_no_fpsimd(const struct a
+ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+-                              int __unused)
++                              int scope)
+ {
+       char const *str = "command line option";
+-      u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+       /*
+        * For reasons that aren't entirely clear, enabling KPTI on Cavium
+@@ -863,8 +862,7 @@ static bool unmap_kernel_at_el0(const st
+       }
+       /* Defer to CPU feature registers */
+-      return !cpuid_feature_extract_unsigned_field(pfr0,
+-                                                   ID_AA64PFR0_CSV3_SHIFT);
++      return !has_cpuid_feature(entry, scope);
+ }
+ static void
+@@ -1011,7 +1009,15 @@ static const struct arm64_cpu_capabiliti
+       {
+               .desc = "Kernel page table isolation (KPTI)",
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+-              .type = ARM64_CPUCAP_SYSTEM_FEATURE,
++              .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
++              /*
++               * The ID feature fields below are used to indicate that
++               * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
++               * more details.
++               */
++              .sys_reg = SYS_ID_AA64PFR0_EL1,
++              .field_pos = ID_AA64PFR0_CSV3_SHIFT,
++              .min_field_value = 1,
+               .matches = unmap_kernel_at_el0,
+               .cpu_enable = kpti_install_ng_mappings,
+       },
diff --git a/queue-4.14/arm64-capabilities-split-the-processing-of-errata-work-arounds.patch b/queue-4.14/arm64-capabilities-split-the-processing-of-errata-work-arounds.patch
new file mode 100644 (file)
index 0000000..536500f
--- /dev/null
@@ -0,0 +1,59 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:03 +0200
+Subject: arm64: capabilities: Split the processing of errata work arounds
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-19-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit d69fe9a7e7214d49fe157ec20889892388d0fe23 ]
+
+Right now we run through the errata workarounds check on all boot
+active CPUs, with SCOPE_ALL. This wouldn't help for detecting erratum
+workarounds with a SYSTEM_SCOPE. There are none yet, but we plan to
+introduce some: let us clean this up so that such workarounds can be
+detected and enabled correctly.
+
+So, we run the checks with SCOPE_LOCAL_CPU on all CPUs and SCOPE_SYSTEM
+checks are run only once after all the boot time CPUs are active.
+
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -529,7 +529,7 @@ void __init init_cpu_features(struct cpu
+        * Run the errata work around checks on the boot CPU, once we have
+        * initialised the cpu feature infrastructure.
+        */
+-      update_cpu_capabilities(arm64_errata, SCOPE_ALL,
++      update_cpu_capabilities(arm64_errata, SCOPE_LOCAL_CPU,
+                               "enabling workaround for");
+ }
+@@ -1354,7 +1354,7 @@ void check_local_cpu_capabilities(void)
+        * advertised capabilities.
+        */
+       if (!sys_caps_initialised)
+-              update_cpu_capabilities(arm64_errata, SCOPE_ALL,
++              update_cpu_capabilities(arm64_errata, SCOPE_LOCAL_CPU,
+                                       "enabling workaround for");
+       else
+               verify_local_cpu_capabilities();
+@@ -1383,6 +1383,8 @@ void __init setup_cpu_features(void)
+       /* Set the CPU feature capabilies */
+       update_cpu_capabilities(arm64_features, SCOPE_ALL, "detected:");
++      update_cpu_capabilities(arm64_errata, SCOPE_SYSTEM,
++                              "enabling workaround for");
+       enable_cpu_capabilities(arm64_features, SCOPE_ALL);
+       enable_cpu_capabilities(arm64_errata, SCOPE_ALL);
+       mark_const_caps_ready();
diff --git a/queue-4.14/arm64-capabilities-unify-the-verification.patch b/queue-4.14/arm64-capabilities-unify-the-verification.patch
new file mode 100644 (file)
index 0000000..191269d
--- /dev/null
@@ -0,0 +1,147 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:00 +0200
+Subject: arm64: capabilities: Unify the verification
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-16-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit eaac4d83daa50fc1b9b7850346e9a62adfd4647e ]
+
+Now that each capability describes how to treat the conflicts
+of CPU cap state vs System wide cap state, we can unify the
+verification logic to a single place.
+
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |   91 ++++++++++++++++++++++++++---------------
+ 1 file changed, 58 insertions(+), 33 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1229,6 +1229,58 @@ static inline void set_sys_caps_initiali
+ }
+ /*
++ * Run through the list of capabilities to check for conflicts.
++ * If the system has already detected a capability, take necessary
++ * action on this CPU.
++ *
++ * Returns "false" on conflicts.
++ */
++static bool
++__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps_list)
++{
++      bool cpu_has_cap, system_has_cap;
++      const struct arm64_cpu_capabilities *caps;
++
++      for (caps = caps_list; caps->matches; caps++) {
++              cpu_has_cap = __this_cpu_has_cap(caps_list, caps->capability);
++              system_has_cap = cpus_have_cap(caps->capability);
++
++              if (system_has_cap) {
++                      /*
++                       * Check if the new CPU misses an advertised feature,
++                       * which is not safe to miss.
++                       */
++                      if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
++                              break;
++                      /*
++                       * We have to issue cpu_enable() irrespective of
++                       * whether the CPU has it or not, as it is enabeld
++                       * system wide. It is upto the call back to take
++                       * appropriate action on this CPU.
++                       */
++                      if (caps->cpu_enable)
++                              caps->cpu_enable(caps);
++              } else {
++                      /*
++                       * Check if the CPU has this capability if it isn't
++                       * safe to have when the system doesn't.
++                       */
++                      if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
++                              break;
++              }
++      }
++
++      if (caps->matches) {
++              pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
++                      smp_processor_id(), caps->capability,
++                      caps->desc, system_has_cap, cpu_has_cap);
++              return false;
++      }
++
++      return true;
++}
++
++/*
+  * Check for CPU features that are used in early boot
+  * based on the Boot CPU value.
+  */
+@@ -1250,25 +1302,10 @@ verify_local_elf_hwcaps(const struct arm
+               }
+ }
+-static void
+-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
++static void verify_local_cpu_features(void)
+ {
+-      const struct arm64_cpu_capabilities *caps = caps_list;
+-      for (; caps->matches; caps++) {
+-              if (!cpus_have_cap(caps->capability))
+-                      continue;
+-              /*
+-               * If the new CPU misses an advertised feature, we cannot proceed
+-               * further, park the cpu.
+-               */
+-              if (!__this_cpu_has_cap(caps_list, caps->capability)) {
+-                      pr_crit("CPU%d: missing feature: %s\n",
+-                                      smp_processor_id(), caps->desc);
+-                      cpu_die_early();
+-              }
+-              if (caps->cpu_enable)
+-                      caps->cpu_enable(caps);
+-      }
++      if (!__verify_local_cpu_caps(arm64_features))
++              cpu_die_early();
+ }
+ /*
+@@ -1278,20 +1315,8 @@ verify_local_cpu_features(const struct a
+  */
+ static void verify_local_cpu_errata_workarounds(void)
+ {
+-      const struct arm64_cpu_capabilities *caps = arm64_errata;
+-
+-      for (; caps->matches; caps++) {
+-              if (cpus_have_cap(caps->capability)) {
+-                      if (caps->cpu_enable)
+-                              caps->cpu_enable(caps);
+-              } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
+-                      pr_crit("CPU%d: Requires work around for %s, not detected"
+-                                      " at boot time\n",
+-                              smp_processor_id(),
+-                              caps->desc ? : "an erratum");
+-                      cpu_die_early();
+-              }
+-      }
++      if (!__verify_local_cpu_caps(arm64_errata))
++              cpu_die_early();
+ }
+ static void update_cpu_errata_workarounds(void)
+@@ -1315,7 +1340,7 @@ static void __init enable_errata_workaro
+ static void verify_local_cpu_capabilities(void)
+ {
+       verify_local_cpu_errata_workarounds();
+-      verify_local_cpu_features(arm64_features);
++      verify_local_cpu_features();
+       verify_local_elf_hwcaps(arm64_elf_hwcaps);
+       if (system_supports_32bit_el0())
+               verify_local_elf_hwcaps(compat_elf_hwcaps);
diff --git a/queue-4.14/arm64-capabilities-update-prototype-for-enable-call-back.patch b/queue-4.14/arm64-capabilities-update-prototype-for-enable-call-back.patch
new file mode 100644 (file)
index 0000000..395710f
--- /dev/null
@@ -0,0 +1,449 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:55 +0200
+Subject: arm64: capabilities: Update prototype for enable call back
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>, James Morse <james.morse@arm.com>, Robin Murphy <robin.murphy@arm.com>, Julien Thierry <julien.thierry@arm.com>
+Message-ID: <20191024124833.4158-11-ard.biesheuvel@linaro.org>
+
+From: Dave Martin <dave.martin@arm.com>
+
+[ Upstream commit c0cda3b8ee6b4b6851b2fd8b6db91fd7b0e2524a ]
+
+We issue the enable() call back for all CPU hwcaps capabilities
+available on the system, on all the CPUs. So far we have ignored
+the argument passed to the call back, which had a prototype to
+accept a "void *" for use with on_each_cpu() and later with
+stop_machine(). However, with commit 0a0d111d40fd1
+("arm64: cpufeature: Pass capability structure to ->enable callback"),
+there are some users of the argument who wants the matching capability
+struct pointer where there are multiple matching criteria for a single
+capability. Clean up the declaration of the call back to make it clear.
+
+ 1) Renamed to cpu_enable(), to imply taking necessary actions on the
+    called CPU for the entry.
+ 2) Pass const pointer to the capability, to allow the call back to
+    check the entry. (e.,g to check if any action is needed on the CPU)
+ 3) We don't care about the result of the call back, turning this to
+    a void.
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Andre Przywara <andre.przywara@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Acked-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Dave Martin <dave.martin@arm.com>
+[suzuki: convert more users, rename call back and drop results]
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpufeature.h |    7 +++-
+ arch/arm64/include/asm/processor.h  |    5 +--
+ arch/arm64/kernel/cpu_errata.c      |   55 +++++++++++++++++-------------------
+ arch/arm64/kernel/cpufeature.c      |   34 +++++++++++++---------
+ arch/arm64/kernel/fpsimd.c          |    1 
+ arch/arm64/kernel/traps.c           |    4 +-
+ arch/arm64/mm/fault.c               |    3 -
+ 7 files changed, 60 insertions(+), 49 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -96,7 +96,12 @@ struct arm64_cpu_capabilities {
+       u16 capability;
+       int def_scope;                  /* default scope */
+       bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
+-      int (*enable)(void *);          /* Called on all active CPUs */
++      /*
++       * Take the appropriate actions to enable this capability for this CPU.
++       * For each successfully booted CPU, this method is called for each
++       * globally detected capability.
++       */
++      void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
+       union {
+               struct {        /* To be used for erratum handling only */
+                       u32 midr_model;
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -37,6 +37,7 @@
+ #include <linux/string.h>
+ #include <asm/alternative.h>
++#include <asm/cpufeature.h>
+ #include <asm/fpsimd.h>
+ #include <asm/hw_breakpoint.h>
+ #include <asm/lse.h>
+@@ -222,8 +223,8 @@ static inline void spin_lock_prefetch(co
+ #endif
+-int cpu_enable_pan(void *__unused);
+-int cpu_enable_cache_maint_trap(void *__unused);
++void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
++void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ASM_PROCESSOR_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -61,11 +61,11 @@ has_mismatched_cache_type(const struct a
+              (arm64_ftr_reg_ctrel0.sys_val & mask);
+ }
+-static int cpu_enable_trap_ctr_access(void *__unused)
++static void
++cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
+ {
+       /* Clear SCTLR_EL1.UCT */
+       config_sctlr_el1(SCTLR_EL1_UCT, 0);
+-      return 0;
+ }
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+@@ -169,25 +169,25 @@ static void call_hvc_arch_workaround_1(v
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+-static int enable_smccc_arch_workaround_1(void *data)
++static void
++enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ {
+-      const struct arm64_cpu_capabilities *entry = data;
+       bp_hardening_cb_t cb;
+       void *smccc_start, *smccc_end;
+       struct arm_smccc_res res;
+       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+-              return 0;
++              return;
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+-              return 0;
++              return;
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if ((int)res.a0 < 0)
+-                      return 0;
++                      return;
+               cb = call_hvc_arch_workaround_1;
+               smccc_start = __smccc_workaround_1_hvc_start;
+               smccc_end = __smccc_workaround_1_hvc_end;
+@@ -197,19 +197,19 @@ static int enable_smccc_arch_workaround_
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+               if ((int)res.a0 < 0)
+-                      return 0;
++                      return;
+               cb = call_smc_arch_workaround_1;
+               smccc_start = __smccc_workaround_1_smc_start;
+               smccc_end = __smccc_workaround_1_smc_end;
+               break;
+       default:
+-              return 0;
++              return;
+       }
+       install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+-      return 0;
++      return;
+ }
+ static void qcom_link_stack_sanitization(void)
+@@ -224,15 +224,12 @@ static void qcom_link_stack_sanitization
+                    : "=&r" (tmp));
+ }
+-static int qcom_enable_link_stack_sanitization(void *data)
++static void
++qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
+ {
+-      const struct arm64_cpu_capabilities *entry = data;
+-
+       install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
+                               __qcom_hyp_sanitize_link_stack_start,
+                               __qcom_hyp_sanitize_link_stack_end);
+-
+-      return 0;
+ }
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+@@ -431,7 +428,7 @@ const struct arm64_cpu_capabilities arm6
+               .desc = "ARM errata 826319, 827319, 824069",
+               .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+               MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
+-              .enable = cpu_enable_cache_maint_trap,
++              .cpu_enable = cpu_enable_cache_maint_trap,
+       },
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_819472
+@@ -440,7 +437,7 @@ const struct arm64_cpu_capabilities arm6
+               .desc = "ARM errata 819472",
+               .capability = ARM64_WORKAROUND_CLEAN_CACHE,
+               MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
+-              .enable = cpu_enable_cache_maint_trap,
++              .cpu_enable = cpu_enable_cache_maint_trap,
+       },
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_832075
+@@ -521,14 +518,14 @@ const struct arm64_cpu_capabilities arm6
+               .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
+               .matches = has_mismatched_cache_type,
+               .def_scope = SCOPE_LOCAL_CPU,
+-              .enable = cpu_enable_trap_ctr_access,
++              .cpu_enable = cpu_enable_trap_ctr_access,
+       },
+       {
+               .desc = "Mismatched cache type",
+               .capability = ARM64_MISMATCHED_CACHE_TYPE,
+               .matches = has_mismatched_cache_type,
+               .def_scope = SCOPE_LOCAL_CPU,
+-              .enable = cpu_enable_trap_ctr_access,
++              .cpu_enable = cpu_enable_trap_ctr_access,
+       },
+ #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+       {
+@@ -567,27 +564,27 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+-              .enable = enable_smccc_arch_workaround_1,
++              .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+-              .enable = enable_smccc_arch_workaround_1,
++              .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+-              .enable = enable_smccc_arch_workaround_1,
++              .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+-              .enable = enable_smccc_arch_workaround_1,
++              .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+-              .enable = qcom_enable_link_stack_sanitization,
++              .cpu_enable = qcom_enable_link_stack_sanitization,
+       },
+       {
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+@@ -596,7 +593,7 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+-              .enable = qcom_enable_link_stack_sanitization,
++              .cpu_enable = qcom_enable_link_stack_sanitization,
+       },
+       {
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+@@ -605,12 +602,12 @@ const struct arm64_cpu_capabilities arm6
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+-              .enable = enable_smccc_arch_workaround_1,
++              .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+-              .enable = enable_smccc_arch_workaround_1,
++              .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+ #endif
+ #ifdef CONFIG_ARM64_SSBD
+@@ -636,8 +633,8 @@ void verify_local_cpu_errata_workarounds
+       for (; caps->matches; caps++) {
+               if (cpus_have_cap(caps->capability)) {
+-                      if (caps->enable)
+-                              caps->enable((void *)caps);
++                      if (caps->cpu_enable)
++                              caps->cpu_enable(caps);
+               } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
+                       pr_crit("CPU%d: Requires work around for %s, not detected"
+                                       " at boot time\n",
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -859,7 +859,8 @@ static bool unmap_kernel_at_el0(const st
+                                                    ID_AA64PFR0_CSV3_SHIFT);
+ }
+-static int kpti_install_ng_mappings(void *__unused)
++static void
++kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+ {
+       typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+       extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+@@ -869,7 +870,7 @@ static int kpti_install_ng_mappings(void
+       int cpu = smp_processor_id();
+       if (kpti_applied)
+-              return 0;
++              return;
+       remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+@@ -880,7 +881,7 @@ static int kpti_install_ng_mappings(void
+       if (!cpu)
+               kpti_applied = true;
+-      return 0;
++      return;
+ }
+ static int __init parse_kpti(char *str)
+@@ -897,7 +898,7 @@ static int __init parse_kpti(char *str)
+ early_param("kpti", parse_kpti);
+ #endif        /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+-static int cpu_copy_el2regs(void *__unused)
++static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
+ {
+       /*
+        * Copy register values that aren't redirected by hardware.
+@@ -909,8 +910,6 @@ static int cpu_copy_el2regs(void *__unus
+        */
+       if (!alternatives_applied)
+               write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+-
+-      return 0;
+ }
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+@@ -934,7 +933,7 @@ static const struct arm64_cpu_capabiliti
+               .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = 1,
+-              .enable = cpu_enable_pan,
++              .cpu_enable = cpu_enable_pan,
+       },
+ #endif /* CONFIG_ARM64_PAN */
+ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+@@ -982,7 +981,7 @@ static const struct arm64_cpu_capabiliti
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+               .def_scope = SCOPE_SYSTEM,
+               .matches = runs_at_el2,
+-              .enable = cpu_copy_el2regs,
++              .cpu_enable = cpu_copy_el2regs,
+       },
+       {
+               .desc = "32-bit EL0 Support",
+@@ -1006,7 +1005,7 @@ static const struct arm64_cpu_capabiliti
+               .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+               .def_scope = SCOPE_SYSTEM,
+               .matches = unmap_kernel_at_el0,
+-              .enable = kpti_install_ng_mappings,
++              .cpu_enable = kpti_install_ng_mappings,
+       },
+ #endif
+       {
+@@ -1169,6 +1168,14 @@ void update_cpu_capabilities(const struc
+       }
+ }
++static int __enable_cpu_capability(void *arg)
++{
++      const struct arm64_cpu_capabilities *cap = arg;
++
++      cap->cpu_enable(cap);
++      return 0;
++}
++
+ /*
+  * Run through the enabled capabilities and enable() it on all active
+  * CPUs
+@@ -1184,14 +1191,15 @@ void __init enable_cpu_capabilities(cons
+               /* Ensure cpus_have_const_cap(num) works */
+               static_branch_enable(&cpu_hwcap_keys[num]);
+-              if (caps->enable) {
++              if (caps->cpu_enable) {
+                       /*
+                        * Use stop_machine() as it schedules the work allowing
+                        * us to modify PSTATE, instead of on_each_cpu() which
+                        * uses an IPI, giving us a PSTATE that disappears when
+                        * we return.
+                        */
+-                      stop_machine(caps->enable, (void *)caps, cpu_online_mask);
++                      stop_machine(__enable_cpu_capability, (void *)caps,
++                                   cpu_online_mask);
+               }
+       }
+ }
+@@ -1249,8 +1257,8 @@ verify_local_cpu_features(const struct a
+                                       smp_processor_id(), caps->desc);
+                       cpu_die_early();
+               }
+-              if (caps->enable)
+-                      caps->enable((void *)caps);
++              if (caps->cpu_enable)
++                      caps->cpu_enable(caps);
+       }
+ }
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -28,6 +28,7 @@
+ #include <linux/signal.h>
+ #include <asm/fpsimd.h>
++#include <asm/cpufeature.h>
+ #include <asm/cputype.h>
+ #include <asm/simd.h>
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -38,6 +38,7 @@
+ #include <asm/atomic.h>
+ #include <asm/bug.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/esr.h>
+ #include <asm/insn.h>
+@@ -436,10 +437,9 @@ asmlinkage void __exception do_undefinst
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ }
+-int cpu_enable_cache_maint_trap(void *__unused)
++void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+ {
+       config_sctlr_el1(SCTLR_EL1_UCI, 0);
+-      return 0;
+ }
+ #define __user_cache_maint(insn, address, res)                        \
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -875,7 +875,7 @@ asmlinkage int __exception do_debug_exce
+ NOKPROBE_SYMBOL(do_debug_exception);
+ #ifdef CONFIG_ARM64_PAN
+-int cpu_enable_pan(void *__unused)
++void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
+ {
+       /*
+        * We modify PSTATE. This won't work from irq context as the PSTATE
+@@ -885,6 +885,5 @@ int cpu_enable_pan(void *__unused)
+       config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+       asm(SET_PSTATE_PAN(1));
+-      return 0;
+ }
+ #endif /* CONFIG_ARM64_PAN */
diff --git a/queue-4.14/arm64-cpufeature-detect-ssbs-and-advertise-to-userspace.patch b/queue-4.14/arm64-cpufeature-detect-ssbs-and-advertise-to-userspace.patch
new file mode 100644 (file)
index 0000000..658ea71
--- /dev/null
@@ -0,0 +1,182 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:17 +0200
+Subject: arm64: cpufeature: Detect SSBS and advertise to userspace
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-33-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit d71be2b6c0e19180b5f80a6d42039cc074a693a2 ]
+
+Armv8.5 introduces a new PSTATE bit known as Speculative Store Bypass
+Safe (SSBS) which can be used as a mitigation against Spectre variant 4.
+
+Additionally, a CPU may provide instructions to manipulate PSTATE.SSBS
+directly, so that userspace can toggle the SSBS control without trapping
+to the kernel.
+
+This patch probes for the existence of SSBS and advertise the new instructions
+to userspace if they exist.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpucaps.h    |    3 ++-
+ arch/arm64/include/asm/sysreg.h     |   16 ++++++++++++----
+ arch/arm64/include/uapi/asm/hwcap.h |    1 +
+ arch/arm64/kernel/cpufeature.c      |   19 +++++++++++++++++--
+ arch/arm64/kernel/cpuinfo.c         |    1 +
+ 5 files changed, 33 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -44,7 +44,8 @@
+ #define ARM64_HARDEN_BRANCH_PREDICTOR         24
+ #define ARM64_SSBD                            25
+ #define ARM64_MISMATCHED_CACHE_TYPE           26
++#define ARM64_SSBS                            27
+-#define ARM64_NCAPS                           27
++#define ARM64_NCAPS                           28
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -297,6 +297,7 @@
+ #define SYS_ICH_LR15_EL2              __SYS__LR8_EL2(7)
+ /* Common SCTLR_ELx flags. */
++#define SCTLR_ELx_DSSBS       (1UL << 44)
+ #define SCTLR_ELx_EE    (1 << 25)
+ #define SCTLR_ELx_WXN (1 << 19)
+ #define SCTLR_ELx_I   (1 << 12)
+@@ -316,7 +317,7 @@
+                        (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
+                        (1 << 17) | (1 << 20) | (1 << 21) | (1 << 24) | \
+                        (1 << 26) | (1 << 27) | (1 << 30) | (1 << 31) | \
+-                       (0xffffffffUL << 32))
++                       (0xffffefffUL << 32))
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL2                SCTLR_ELx_EE
+@@ -330,7 +331,7 @@
+ #define SCTLR_EL2_SET (ENDIAN_SET_EL2   | SCTLR_EL2_RES1)
+ #define SCTLR_EL2_CLEAR       (SCTLR_ELx_M      | SCTLR_ELx_A    | SCTLR_ELx_C   | \
+                        SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
+-                       ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
++                       SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+ #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+ #error "Inconsistent SCTLR_EL2 set/clear bits"
+@@ -354,7 +355,7 @@
+                        (1 << 29))
+ #define SCTLR_EL1_RES0  ((1 << 6)  | (1 << 10) | (1 << 13) | (1 << 17) | \
+                        (1 << 21) | (1 << 27) | (1 << 30) | (1 << 31) | \
+-                       (0xffffffffUL << 32))
++                       (0xffffefffUL << 32))
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL1                (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+@@ -371,7 +372,7 @@
+                        SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
+ #define SCTLR_EL1_CLEAR       (SCTLR_ELx_A   | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD    |\
+                        SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
+-                       SCTLR_EL1_RES0)
++                       SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
+ #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+ #error "Inconsistent SCTLR_EL1 set/clear bits"
+@@ -417,6 +418,13 @@
+ #define ID_AA64PFR0_EL0_64BIT_ONLY    0x1
+ #define ID_AA64PFR0_EL0_32BIT_64BIT   0x2
++/* id_aa64pfr1 */
++#define ID_AA64PFR1_SSBS_SHIFT                4
++
++#define ID_AA64PFR1_SSBS_PSTATE_NI    0
++#define ID_AA64PFR1_SSBS_PSTATE_ONLY  1
++#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
++
+ /* id_aa64mmfr0 */
+ #define ID_AA64MMFR0_TGRAN4_SHIFT     28
+ #define ID_AA64MMFR0_TGRAN64_SHIFT    24
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -48,5 +48,6 @@
+ #define HWCAP_USCAT           (1 << 25)
+ #define HWCAP_ILRCPC          (1 << 26)
+ #define HWCAP_FLAGM           (1 << 27)
++#define HWCAP_SSBS            (1 << 28)
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -145,6 +145,11 @@ static const struct arm64_ftr_bits ftr_i
+       ARM64_FTR_END,
+ };
++static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
++      ARM64_FTR_END,
++};
++
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+@@ -345,7 +350,7 @@ static const struct __ftr_reg_entry {
+       /* Op1 = 0, CRn = 0, CRm = 4 */
+       ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
+-      ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
++      ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
+       /* Op1 = 0, CRn = 0, CRm = 5 */
+       ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
+@@ -625,7 +630,6 @@ void update_cpu_features(int cpu,
+       /*
+        * EL3 is not our concern.
+-       * ID_AA64PFR1 is currently RES0.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
+                                     info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
+@@ -1045,6 +1049,16 @@ static const struct arm64_cpu_capabiliti
+               .min_field_value = 1,
+       },
+ #endif
++      {
++              .desc = "Speculative Store Bypassing Safe (SSBS)",
++              .capability = ARM64_SSBS,
++              .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
++              .matches = has_cpuid_feature,
++              .sys_reg = SYS_ID_AA64PFR1_EL1,
++              .field_pos = ID_AA64PFR1_SSBS_SHIFT,
++              .sign = FTR_UNSIGNED,
++              .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
++      },
+       {},
+ };
+@@ -1087,6 +1101,7 @@ static const struct arm64_cpu_capabiliti
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
+       HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
++      HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+       {},
+ };
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -80,6 +80,7 @@ static const char *const hwcap_str[] = {
+       "uscat",
+       "ilrcpc",
+       "flagm",
++      "ssbs",
+       NULL
+ };
diff --git a/queue-4.14/arm64-documentation-cpu-feature-registers-remove-res0-fields.patch b/queue-4.14/arm64-documentation-cpu-feature-registers-remove-res0-fields.patch
new file mode 100644 (file)
index 0000000..b99c271
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:50 +0200
+Subject: arm64: Documentation: cpu-feature-registers: Remove RES0 fields
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-6-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 847ecd3fa311cde0f10a1b66c572abb136742b1d ]
+
+Remove the invisible RES0 field entries from the table, listing
+fields in CPU ID feature registers, as :
+ 1) We are only interested in the user visible fields.
+ 2) The field description may not be up-to-date, as the
+    field could be assigned a new meaning.
+ 3) We already explain the rules of the fields which are not
+    visible.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+[ardb: fix up for missing SVE in context]
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.txt |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/Documentation/arm64/cpu-feature-registers.txt
++++ b/Documentation/arm64/cpu-feature-registers.txt
+@@ -110,7 +110,6 @@ infrastructure:
+      x--------------------------------------------------x
+      | Name                         |  bits   | visible |
+      |--------------------------------------------------|
+-     | RES0                         | [63-52] |    n    |
+      |--------------------------------------------------|
+      | FHM                          | [51-48] |    y    |
+      |--------------------------------------------------|
+@@ -124,8 +123,6 @@ infrastructure:
+      |--------------------------------------------------|
+      | RDM                          | [31-28] |    y    |
+      |--------------------------------------------------|
+-     | RES0                         | [27-24] |    n    |
+-     |--------------------------------------------------|
+      | ATOMICS                      | [23-20] |    y    |
+      |--------------------------------------------------|
+      | CRC32                        | [19-16] |    y    |
+@@ -135,8 +132,6 @@ infrastructure:
+      | SHA1                         | [11-8]  |    y    |
+      |--------------------------------------------------|
+      | AES                          | [7-4]   |    y    |
+-     |--------------------------------------------------|
+-     | RES0                         | [3-0]   |    n    |
+      x--------------------------------------------------x
+@@ -144,7 +139,8 @@ infrastructure:
+      x--------------------------------------------------x
+      | Name                         |  bits   | visible |
+      |--------------------------------------------------|
+-     | RES0                         | [63-28] |    n    |
++     |--------------------------------------------------|
++     | SVE                          | [35-32] |    y    |
+      |--------------------------------------------------|
+      | GIC                          | [27-24] |    n    |
+      |--------------------------------------------------|
diff --git a/queue-4.14/arm64-don-t-zero-dit-on-signal-return.patch b/queue-4.14/arm64-don-t-zero-dit-on-signal-return.patch
new file mode 100644 (file)
index 0000000..718577e
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:15 +0200
+Subject: arm64: don't zero DIT on signal return
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-31-ard.biesheuvel@linaro.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 1265132127b63502d34e0f58c8bdef3a4dc927c2 ]
+
+Currently valid_user_regs() treats SPSR_ELx.DIT as a RES0 bit, causing
+it to be zeroed upon exception return, rather than preserved. Thus, code
+relying on DIT will not function as expected, and may expose an
+unexpected timing sidechannel.
+
+Let's remove DIT from the set of RES0 bits, such that it is preserved.
+At the same time, the related comment is updated to better describe the
+situation, and to take into account the most recent documentation of
+SPSR_ELx, in ARM DDI 0487C.a.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Fixes: 7206dc93a58fb764 ("arm64: Expose Arm v8.4 features")
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/ptrace.c |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1402,15 +1402,19 @@ asmlinkage void syscall_trace_exit(struc
+ }
+ /*
+- * Bits which are always architecturally RES0 per ARM DDI 0487A.h
++ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
++ * We also take into account DIT (bit 24), which is not yet documented, and
++ * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
++ * allocated an EL0 meaning in future.
+  * Userspace cannot use these until they have an architectural meaning.
++ * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
+  * We also reserve IL for the kernel; SS is handled dynamically.
+  */
+ #define SPSR_EL1_AARCH64_RES0_BITS \
+-      (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
+-       GENMASK_ULL(5, 5))
++      (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
++       GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+ #define SPSR_EL1_AARCH32_RES0_BITS \
+-      (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
++      (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+ static int valid_compat_regs(struct user_pt_regs *regs)
+ {
diff --git a/queue-4.14/arm64-enable-generic-cpu-vulnerabilites-support.patch b/queue-4.14/arm64-enable-generic-cpu-vulnerabilites-support.patch
new file mode 100644 (file)
index 0000000..85354e2
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:23 +0200
+Subject: arm64: enable generic CPU vulnerabilites support
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Mian Yousaf Kaukab <ykaukab@suse.de>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-39-ard.biesheuvel@linaro.org>
+
+From: Mian Yousaf Kaukab <ykaukab@suse.de>
+
+[ Upstream commit 61ae1321f06c4489c724c803e9b8363dea576da3 ]
+
+Enable CPU vulnerabilty show functions for spectre_v1, spectre_v2,
+meltdown and store-bypass.
+
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -49,6 +49,7 @@ config ARM64
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CLOCKEVENTS_BROADCAST
+       select GENERIC_CPU_AUTOPROBE
++      select GENERIC_CPU_VULNERABILITIES
+       select GENERIC_EARLY_IOREMAP
+       select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IRQ_PROBE
diff --git a/queue-4.14/arm64-expose-arm-v8.4-features.patch b/queue-4.14/arm64-expose-arm-v8.4-features.patch
new file mode 100644 (file)
index 0000000..cec46e5
--- /dev/null
@@ -0,0 +1,170 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:51 +0200
+Subject: arm64: Expose Arm v8.4 features
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>
+Message-ID: <20191024124833.4158-7-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 7206dc93a58fb76421c4411eefa3c003337bcb2d ]
+
+Expose the new features introduced by Arm v8.4 extensions to
+Arm v8-A profile.
+
+These include :
+
+ 1) Data indpendent timing of instructions. (DIT, exposed as HWCAP_DIT)
+ 2) Unaligned atomic instructions and Single-copy atomicity of loads
+    and stores. (AT, expose as HWCAP_USCAT)
+ 3) LDAPR and STLR instructions with immediate offsets (extension to
+    LRCPC, exposed as HWCAP_ILRCPC)
+ 4) Flag manipulation instructions (TS, exposed as HWCAP_FLAGM).
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+[ardb: fix up context for missing SVE]
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.txt |   10 ++++++++++
+ arch/arm64/include/asm/sysreg.h               |    3 +++
+ arch/arm64/include/uapi/asm/hwcap.h           |    4 ++++
+ arch/arm64/kernel/cpufeature.c                |    7 +++++++
+ arch/arm64/kernel/cpuinfo.c                   |    4 ++++
+ 5 files changed, 28 insertions(+)
+
+--- a/Documentation/arm64/cpu-feature-registers.txt
++++ b/Documentation/arm64/cpu-feature-registers.txt
+@@ -110,6 +110,7 @@ infrastructure:
+      x--------------------------------------------------x
+      | Name                         |  bits   | visible |
+      |--------------------------------------------------|
++     | TS                           | [55-52] |    y    |
+      |--------------------------------------------------|
+      | FHM                          | [51-48] |    y    |
+      |--------------------------------------------------|
+@@ -139,6 +140,7 @@ infrastructure:
+      x--------------------------------------------------x
+      | Name                         |  bits   | visible |
+      |--------------------------------------------------|
++     | DIT                          | [51-48] |    y    |
+      |--------------------------------------------------|
+      | SVE                          | [35-32] |    y    |
+      |--------------------------------------------------|
+@@ -191,6 +193,14 @@ infrastructure:
+      | DPB                          | [3-0]   |    y    |
+      x--------------------------------------------------x
++  5) ID_AA64MMFR2_EL1 - Memory model feature register 2
++
++     x--------------------------------------------------x
++     | Name                         |  bits   | visible |
++     |--------------------------------------------------|
++     | AT                           | [35-32] |    y    |
++     x--------------------------------------------------x
++
+ Appendix I: Example
+ ---------------------------
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -375,6 +375,7 @@
+ #define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+ /* id_aa64isar0 */
++#define ID_AA64ISAR0_TS_SHIFT         52
+ #define ID_AA64ISAR0_FHM_SHIFT                48
+ #define ID_AA64ISAR0_DP_SHIFT         44
+ #define ID_AA64ISAR0_SM4_SHIFT                40
+@@ -396,6 +397,7 @@
+ /* id_aa64pfr0 */
+ #define ID_AA64PFR0_CSV3_SHIFT                60
+ #define ID_AA64PFR0_CSV2_SHIFT                56
++#define ID_AA64PFR0_DIT_SHIFT         48
+ #define ID_AA64PFR0_GIC_SHIFT         24
+ #define ID_AA64PFR0_ASIMD_SHIFT               20
+ #define ID_AA64PFR0_FP_SHIFT          16
+@@ -441,6 +443,7 @@
+ #define ID_AA64MMFR1_VMIDBITS_16      2
+ /* id_aa64mmfr2 */
++#define ID_AA64MMFR2_AT_SHIFT         32
+ #define ID_AA64MMFR2_LVA_SHIFT                16
+ #define ID_AA64MMFR2_IESB_SHIFT               12
+ #define ID_AA64MMFR2_LSM_SHIFT                8
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -44,5 +44,9 @@
+ #define HWCAP_SHA512          (1 << 21)
+ #define HWCAP_SVE             (1 << 22)
+ #define HWCAP_ASIMDFHM                (1 << 23)
++#define HWCAP_DIT             (1 << 24)
++#define HWCAP_USCAT           (1 << 25)
++#define HWCAP_ILRCPC          (1 << 26)
++#define HWCAP_FLAGM           (1 << 27)
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -107,6 +107,7 @@ cpufeature_pan_not_uao(const struct arm6
+  * sync with the documentation of the CPU feature register ABI.
+  */
+ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
+@@ -132,6 +133,7 @@ static const struct arm64_ftr_bits ftr_i
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+       S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+       S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+@@ -171,6 +173,7 @@ static const struct arm64_ftr_bits ftr_i
+ };
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+@@ -1054,14 +1057,18 @@ static const struct arm64_cpu_capabiliti
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
++      HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
+       HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
++      HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
++      HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
+       {},
+ };
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -76,6 +76,10 @@ static const char *const hwcap_str[] = {
+       "sha512",
+       "sve",
+       "asimdfhm",
++      "dit",
++      "uscat",
++      "ilrcpc",
++      "flagm",
+       NULL
+ };
diff --git a/queue-4.14/arm64-expose-support-for-optional-armv8-a-features.patch b/queue-4.14/arm64-expose-support-for-optional-armv8-a-features.patch
new file mode 100644 (file)
index 0000000..1f0e83b
--- /dev/null
@@ -0,0 +1,131 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:47 +0200
+Subject: arm64: Expose support for optional ARMv8-A features
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Dave Martin <dave.martin@arm.com>, Marc Zyngier <marc.zyngier@arm.com>
+Message-ID: <20191024124833.4158-3-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit f5e035f8694c3bdddc66ea46ecda965ee6853718 ]
+
+ARMv8-A adds a few optional features for ARMv8.2 and ARMv8.3.
+Expose them to the userspace via HWCAPs and mrs emulation.
+
+SHA2-512  - Instruction support for SHA512 Hash algorithm (e.g SHA512H,
+           SHA512H2, SHA512U0, SHA512SU1)
+SHA3     - SHA3 crypto instructions (EOR3, RAX1, XAR, BCAX).
+SM3      - Instruction support for Chinese cryptography algorithm SM3
+SM4      - Instruction support for Chinese cryptography algorithm SM4
+DP       - Dot Product instructions (UDOT, SDOT).
+
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Dave Martin <dave.martin@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.txt |   12 +++++++++++-
+ arch/arm64/include/asm/sysreg.h               |    4 ++++
+ arch/arm64/include/uapi/asm/hwcap.h           |    5 +++++
+ arch/arm64/kernel/cpufeature.c                |    9 +++++++++
+ arch/arm64/kernel/cpuinfo.c                   |    5 +++++
+ 5 files changed, 34 insertions(+), 1 deletion(-)
+
+--- a/Documentation/arm64/cpu-feature-registers.txt
++++ b/Documentation/arm64/cpu-feature-registers.txt
+@@ -110,10 +110,20 @@ infrastructure:
+      x--------------------------------------------------x
+      | Name                         |  bits   | visible |
+      |--------------------------------------------------|
+-     | RES0                         | [63-32] |    n    |
++     | RES0                         | [63-48] |    n    |
++     |--------------------------------------------------|
++     | DP                           | [47-44] |    y    |
++     |--------------------------------------------------|
++     | SM4                          | [43-40] |    y    |
++     |--------------------------------------------------|
++     | SM3                          | [39-36] |    y    |
++     |--------------------------------------------------|
++     | SHA3                         | [35-32] |    y    |
+      |--------------------------------------------------|
+      | RDM                          | [31-28] |    y    |
+      |--------------------------------------------------|
++     | RES0                         | [27-24] |    n    |
++     |--------------------------------------------------|
+      | ATOMICS                      | [23-20] |    y    |
+      |--------------------------------------------------|
+      | CRC32                        | [19-16] |    y    |
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -375,6 +375,10 @@
+ #define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+ /* id_aa64isar0 */
++#define ID_AA64ISAR0_DP_SHIFT         44
++#define ID_AA64ISAR0_SM4_SHIFT                40
++#define ID_AA64ISAR0_SM3_SHIFT                36
++#define ID_AA64ISAR0_SHA3_SHIFT               32
+ #define ID_AA64ISAR0_RDM_SHIFT                28
+ #define ID_AA64ISAR0_ATOMICS_SHIFT    20
+ #define ID_AA64ISAR0_CRC32_SHIFT      16
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -37,5 +37,10 @@
+ #define HWCAP_FCMA            (1 << 14)
+ #define HWCAP_LRCPC           (1 << 15)
+ #define HWCAP_DCPOP           (1 << 16)
++#define HWCAP_SHA3            (1 << 17)
++#define HWCAP_SM3             (1 << 18)
++#define HWCAP_SM4             (1 << 19)
++#define HWCAP_ASIMDDP         (1 << 20)
++#define HWCAP_SHA512          (1 << 21)
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -107,6 +107,10 @@ cpufeature_pan_not_uao(const struct arm6
+  * sync with the documentation of the CPU feature register ABI.
+  */
+ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_DP_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
+@@ -1040,9 +1044,14 @@ static const struct arm64_cpu_capabiliti
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -69,6 +69,11 @@ static const char *const hwcap_str[] = {
+       "fcma",
+       "lrcpc",
+       "dcpop",
++      "sha3",
++      "sm3",
++      "sm4",
++      "asimddp",
++      "sha512",
+       NULL
+ };
diff --git a/queue-4.14/arm64-fix-ssbs-sanitization.patch b/queue-4.14/arm64-fix-ssbs-sanitization.patch
new file mode 100644 (file)
index 0000000..86abadb
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:20 +0200
+Subject: arm64: fix SSBS sanitization
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-36-ard.biesheuvel@linaro.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit f54dada8274643e3ff4436df0ea124aeedc43cae ]
+
+In valid_user_regs() we treat SSBS as a RES0 bit, and consequently it is
+unexpectedly cleared when we restore a sigframe or fiddle with GPRs via
+ptrace.
+
+This patch fixes valid_user_regs() to account for this, updating the
+function to refer to the latest ARM ARM (ARM DDI 0487D.a). For AArch32
+tasks, SSBS appears in bit 23 of SPSR_EL1, matching its position in the
+AArch32-native PSR format, and we don't need to translate it as we have
+to for DIT.
+
+There are no other bit assignments that we need to account for today.
+As the recent documentation describes the DIT bit, we can drop our
+comment regarding DIT.
+
+While removing SSBS from the RES0 masks, existing inconsistent
+whitespace is corrected.
+
+Fixes: d71be2b6c0e19180 ("arm64: cpufeature: Detect SSBS and advertise to userspace")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/ptrace.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1402,19 +1402,20 @@ asmlinkage void syscall_trace_exit(struc
+ }
+ /*
+- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
+- * We also take into account DIT (bit 24), which is not yet documented, and
+- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
+- * allocated an EL0 meaning in future.
++ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
++ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
++ * not described in ARM DDI 0487D.a.
++ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
++ * be allocated an EL0 meaning in future.
+  * Userspace cannot use these until they have an architectural meaning.
+  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
+  * We also reserve IL for the kernel; SS is handled dynamically.
+  */
+ #define SPSR_EL1_AARCH64_RES0_BITS \
+-      (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+-       GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
++      (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
++       GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
+ #define SPSR_EL1_AARCH32_RES0_BITS \
+-      (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
++      (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
+ static int valid_compat_regs(struct user_pt_regs *regs)
+ {
diff --git a/queue-4.14/arm64-fix-the-feature-type-for-id-register-fields.patch b/queue-4.14/arm64-fix-the-feature-type-for-id-register-fields.patch
new file mode 100644 (file)
index 0000000..5704643
--- /dev/null
@@ -0,0 +1,210 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:48 +0200
+Subject: arm64: Fix the feature type for ID register fields
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-4-ard.biesheuvel@linaro.org>
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+[ Upstream commit 5bdecb7971572a1aef828df507558e7a4dfe25ec ]
+
+Now that the ARM ARM clearly specifies the rules for inferring
+the values of the ID register fields, fix the types of the
+feature bits we have in the kernel.
+
+As per ARM ARM DDI0487B.b, section D10.1.4 "Principles of the
+ID scheme for fields in ID registers" lists the registers to
+which the scheme applies along with the exceptions.
+
+This patch changes the relevant feature bits from FTR_EXACT
+to FTR_LOWER_SAFE to select the safer value. This will enable
+an older kernel running on a new CPU detect the safer option
+rather than completely disabling the feature.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Dave Martin <dave.martin@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpufeature.c |  102 ++++++++++++++++++++---------------------
+ 1 file changed, 51 insertions(+), 51 deletions(-)
+
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -107,11 +107,11 @@ cpufeature_pan_not_uao(const struct arm6
+  * sync with the documentation of the CPU feature register ABI.
+  */
+ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_DP_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
+@@ -121,36 +121,36 @@ static const struct arm64_ftr_bits ftr_i
+ };
+ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
+       ARM64_FTR_END,
+ };
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+       S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+       S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+       /* Linux doesn't care about the EL3 */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+       ARM64_FTR_END,
+ };
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+-      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+-      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
++      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
++      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+       /* Linux shouldn't care about secure memory */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+       /*
+        * Differing PARange is fine as long as all peripherals and memory are mapped
+        * within the minimum PARange of all CPUs
+@@ -161,20 +161,20 @@ static const struct arm64_ftr_bits ftr_i
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+       ARM64_FTR_END,
+ };
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+       ARM64_FTR_END,
+ };
+@@ -201,14 +201,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel
+ };
+ static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
+-      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf),        /* InnerShr */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0),    /* FCSE */
++      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),   /* InnerShr */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),       /* FCSE */
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),    /* AuxReg */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0),    /* TCM */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0),    /* ShareLvl */
+-      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0),     /* PMSA */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0),     /* VMSA */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),       /* TCM */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),       /* ShareLvl */
++      S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),    /* OuterShr */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* PMSA */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),        /* VMSA */
+       ARM64_FTR_END,
+ };
+@@ -229,8 +229,8 @@ static const struct arm64_ftr_bits ftr_i
+ };
+ static const struct arm64_ftr_bits ftr_mvfr2[] = {
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0),             /* FPMisc */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0),             /* SIMDMisc */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* FPMisc */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* SIMDMisc */
+       ARM64_FTR_END,
+ };
+@@ -242,25 +242,25 @@ static const struct arm64_ftr_bits ftr_d
+ static const struct arm64_ftr_bits ftr_id_isar5[] = {
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
+       ARM64_FTR_END,
+ };
+ static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0),             /* ac2 */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* ac2 */
+       ARM64_FTR_END,
+ };
+ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0),    /* State3 */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0),             /* State2 */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0),             /* State1 */
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0),             /* State0 */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* State1 */
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* State0 */
+       ARM64_FTR_END,
+ };
diff --git a/queue-4.14/arm64-force-ssbs-on-context-switch.patch b/queue-4.14/arm64-force-ssbs-on-context-switch.patch
new file mode 100644 (file)
index 0000000..edfcb54
--- /dev/null
@@ -0,0 +1,118 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:31 +0200
+Subject: arm64: Force SSBS on context switch
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Marc Zyngier <marc.zyngier@arm.com>
+Message-ID: <20191024124833.4158-47-ard.biesheuvel@linaro.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit cbdf8a189a66001c36007bf0f5c975d0376c5c3a ]
+
+On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
+where only some of the CPUs implement SSBS, we end-up losing track of
+the SSBS bit across task migration.
+
+To address this issue, let's force the SSBS bit on context switch.
+
+Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+[will: inverted logic and added comments]
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/processor.h |   14 ++++++++++++--
+ arch/arm64/kernel/process.c        |   29 ++++++++++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -148,6 +148,16 @@ static inline void start_thread_common(s
+       regs->pc = pc;
+ }
++static inline void set_ssbs_bit(struct pt_regs *regs)
++{
++      regs->pstate |= PSR_SSBS_BIT;
++}
++
++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
++{
++      regs->pstate |= PSR_AA32_SSBS_BIT;
++}
++
+ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+                               unsigned long sp)
+ {
+@@ -155,7 +165,7 @@ static inline void start_thread(struct p
+       regs->pstate = PSR_MODE_EL0t;
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+-              regs->pstate |= PSR_SSBS_BIT;
++              set_ssbs_bit(regs);
+       regs->sp = sp;
+ }
+@@ -174,7 +184,7 @@ static inline void compat_start_thread(s
+ #endif
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+-              regs->pstate |= PSR_AA32_SSBS_BIT;
++              set_compat_ssbs_bit(regs);
+       regs->compat_sp = sp;
+ }
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -298,7 +298,7 @@ int copy_thread(unsigned long clone_flag
+                       childregs->pstate |= PSR_UAO_BIT;
+               if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+-                      childregs->pstate |= PSR_SSBS_BIT;
++                      set_ssbs_bit(childregs);
+               p->thread.cpu_context.x19 = stack_start;
+               p->thread.cpu_context.x20 = stk_sz;
+@@ -340,6 +340,32 @@ void uao_thread_switch(struct task_struc
+ }
+ /*
++ * Force SSBS state on context-switch, since it may be lost after migrating
++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
++ */
++static void ssbs_thread_switch(struct task_struct *next)
++{
++      struct pt_regs *regs = task_pt_regs(next);
++
++      /*
++       * Nothing to do for kernel threads, but 'regs' may be junk
++       * (e.g. idle task) so check the flags and bail early.
++       */
++      if (unlikely(next->flags & PF_KTHREAD))
++              return;
++
++      /* If the mitigation is enabled, then we leave SSBS clear. */
++      if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
++          test_tsk_thread_flag(next, TIF_SSBD))
++              return;
++
++      if (compat_user_mode(regs))
++              set_compat_ssbs_bit(regs);
++      else if (user_mode(regs))
++              set_ssbs_bit(regs);
++}
++
++/*
+  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+  * shadow copy so that we can restore this upon entry from userspace.
+  *
+@@ -367,6 +393,7 @@ __notrace_funcgraph struct task_struct *
+       contextidr_thread_switch(next);
+       entry_task_switch(next);
+       uao_thread_switch(next);
++      ssbs_thread_switch(next);
+       /*
+        * Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/queue-4.14/arm64-get-rid-of-__smccc_workaround_1_hvc_.patch b/queue-4.14/arm64-get-rid-of-__smccc_workaround_1_hvc_.patch
new file mode 100644 (file)
index 0000000..4a9ee11
--- /dev/null
@@ -0,0 +1,86 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:16 +0200
+Subject: arm64: Get rid of __smccc_workaround_1_hvc_*
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-32-ard.biesheuvel@linaro.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 22765f30dbaf1118c6ff0fcb8b99c9f2b4d396d5 ]
+
+The very existence of __smccc_workaround_1_hvc_* is a thinko, as
+KVM will never use a HVC call to perform the branch prediction
+invalidation. Even as a nested hypervisor, it would use an SMC
+instruction.
+
+Let's get rid of it.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/bpi.S        |   12 ++----------
+ arch/arm64/kernel/cpu_errata.c |    9 +++------
+ 2 files changed, 5 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -56,21 +56,13 @@ ENTRY(__bp_harden_hyp_vecs_start)
+ ENTRY(__bp_harden_hyp_vecs_end)
+-.macro smccc_workaround_1 inst
++ENTRY(__smccc_workaround_1_smc_start)
+       sub     sp, sp, #(8 * 4)
+       stp     x2, x3, [sp, #(8 * 0)]
+       stp     x0, x1, [sp, #(8 * 2)]
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+-      \inst   #0
++      smc     #0
+       ldp     x2, x3, [sp, #(8 * 0)]
+       ldp     x0, x1, [sp, #(8 * 2)]
+       add     sp, sp, #(8 * 4)
+-.endm
+-
+-ENTRY(__smccc_workaround_1_smc_start)
+-      smccc_workaround_1      smc
+ ENTRY(__smccc_workaround_1_smc_end)
+-
+-ENTRY(__smccc_workaround_1_hvc_start)
+-      smccc_workaround_1      hvc
+-ENTRY(__smccc_workaround_1_hvc_end)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -85,8 +85,6 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_har
+ #ifdef CONFIG_KVM
+ extern char __smccc_workaround_1_smc_start[];
+ extern char __smccc_workaround_1_smc_end[];
+-extern char __smccc_workaround_1_hvc_start[];
+-extern char __smccc_workaround_1_hvc_end[];
+ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+@@ -131,8 +129,6 @@ static void __install_bp_hardening_cb(bp
+ #else
+ #define __smccc_workaround_1_smc_start                NULL
+ #define __smccc_workaround_1_smc_end          NULL
+-#define __smccc_workaround_1_hvc_start                NULL
+-#define __smccc_workaround_1_hvc_end          NULL
+ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                     const char *hyp_vecs_start,
+@@ -206,8 +202,9 @@ enable_smccc_arch_workaround_1(const str
+               if ((int)res.a0 < 0)
+                       return;
+               cb = call_hvc_arch_workaround_1;
+-              smccc_start = __smccc_workaround_1_hvc_start;
+-              smccc_end = __smccc_workaround_1_hvc_end;
++              /* This is a guest, no need to patch KVM vectors */
++              smccc_start = NULL;
++              smccc_end = NULL;
+               break;
+       case PSCI_CONDUIT_SMC:
diff --git a/queue-4.14/arm64-introduce-sysreg_clear_set.patch b/queue-4.14/arm64-introduce-sysreg_clear_set.patch
new file mode 100644 (file)
index 0000000..306c1f4
--- /dev/null
@@ -0,0 +1,60 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:54 +0200
+Subject: arm64: Introduce sysreg_clear_set()
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, Marc Zyngier <marc.zyngier@arm.com>
+Message-ID: <20191024124833.4158-10-ard.biesheuvel@linaro.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 6ebdf4db8fa564a150f46d32178af0873eb5abbb ]
+
+Currently we have a couple of helpers to manipulate bits in particular
+sysregs:
+
+ * config_sctlr_el1(u32 clear, u32 set)
+
+ * change_cpacr(u64 val, u64 mask)
+
+The parameters of these differ in naming convention, order, and size,
+which is unfortunate. They also differ slightly in behaviour, as
+change_cpacr() skips the sysreg write if the bits are unchanged, which
+is a useful optimization when sysreg writes are expensive.
+
+Before we gain yet another sysreg manipulation function, let's
+unify these with a common helper, providing a consistent order for
+clear/set operands, and the write skipping behaviour from
+change_cpacr(). Code will be migrated to the new helper in subsequent
+patches.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/sysreg.h |   11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -584,6 +584,17 @@ asm(
+       asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
+ } while (0)
++/*
++ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
++ * set mask are set. Other bits are left as-is.
++ */
++#define sysreg_clear_set(sysreg, clear, set) do {                     \
++      u64 __scs_val = read_sysreg(sysreg);                            \
++      u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set);            \
++      if (__scs_new != __scs_val)                                     \
++              write_sysreg(__scs_new, sysreg);                        \
++} while (0)
++
+ static inline void config_sctlr_el1(u32 clear, u32 set)
+ {
+       u32 val;
diff --git a/queue-4.14/arm64-kvm-use-smccc_arch_workaround_1-for-falkor-bp-hardening.patch b/queue-4.14/arm64-kvm-use-smccc_arch_workaround_1-for-falkor-bp-hardening.patch
new file mode 100644 (file)
index 0000000..1dc887d
--- /dev/null
@@ -0,0 +1,221 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:14 +0200
+Subject: arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Shanker Donthineni <shankerd@codeaurora.org>, Marc Zyngier <marc.zyngier@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-30-ard.biesheuvel@linaro.org>
+
+From: Shanker Donthineni <shankerd@codeaurora.org>
+
+[ Upstream commit 4bc352ffb39e4eec253e70f8c076f2f48a6c1926 ]
+
+The function SMCCC_ARCH_WORKAROUND_1 was introduced as part of SMC
+V1.1 Calling Convention to mitigate CVE-2017-5715. This patch uses
+the standard call SMCCC_ARCH_WORKAROUND_1 for Falkor chips instead
+of Silicon provider service ID 0xC2001700.
+
+Cc: <stable@vger.kernel.org> # 4.14+
+Signed-off-by: Shanker Donthineni <shankerd@codeaurora.org>
+[maz: reworked errata framework integration]
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpucaps.h |    7 ++---
+ arch/arm64/include/asm/kvm_asm.h |    2 -
+ arch/arm64/kernel/bpi.S          |    7 -----
+ arch/arm64/kernel/cpu_errata.c   |   54 ++++++++++++---------------------------
+ arch/arm64/kvm/hyp/entry.S       |   12 --------
+ arch/arm64/kvm/hyp/switch.c      |   10 -------
+ 6 files changed, 20 insertions(+), 72 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -42,10 +42,9 @@
+ #define ARM64_HAS_DCPOP                               21
+ #define ARM64_UNMAP_KERNEL_AT_EL0             23
+ #define ARM64_HARDEN_BRANCH_PREDICTOR         24
+-#define ARM64_HARDEN_BP_POST_GUEST_EXIT               25
+-#define ARM64_SSBD                            26
+-#define ARM64_MISMATCHED_CACHE_TYPE           27
++#define ARM64_SSBD                            25
++#define ARM64_MISMATCHED_CACHE_TYPE           26
+-#define ARM64_NCAPS                           28
++#define ARM64_NCAPS                           27
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -70,8 +70,6 @@ extern u32 __kvm_get_mdcr_el2(void);
+ extern u32 __init_stage2_translation(void);
+-extern void __qcom_hyp_sanitize_btac_predictors(void);
+-
+ /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
+ #define __hyp_this_cpu_ptr(sym)                                               \
+       ({                                                              \
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -55,13 +55,6 @@ ENTRY(__bp_harden_hyp_vecs_start)
+       .endr
+ ENTRY(__bp_harden_hyp_vecs_end)
+-ENTRY(__qcom_hyp_sanitize_link_stack_start)
+-      stp     x29, x30, [sp, #-16]!
+-      .rept   16
+-      bl      . + 4
+-      .endr
+-      ldp     x29, x30, [sp], #16
+-ENTRY(__qcom_hyp_sanitize_link_stack_end)
+ .macro smccc_workaround_1 inst
+       sub     sp, sp, #(8 * 4)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -83,8 +83,6 @@ cpu_enable_trap_ctr_access(const struct
+ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+ #ifdef CONFIG_KVM
+-extern char __qcom_hyp_sanitize_link_stack_start[];
+-extern char __qcom_hyp_sanitize_link_stack_end[];
+ extern char __smccc_workaround_1_smc_start[];
+ extern char __smccc_workaround_1_smc_end[];
+ extern char __smccc_workaround_1_hvc_start[];
+@@ -131,8 +129,6 @@ static void __install_bp_hardening_cb(bp
+       spin_unlock(&bp_lock);
+ }
+ #else
+-#define __qcom_hyp_sanitize_link_stack_start  NULL
+-#define __qcom_hyp_sanitize_link_stack_end    NULL
+ #define __smccc_workaround_1_smc_start                NULL
+ #define __smccc_workaround_1_smc_end          NULL
+ #define __smccc_workaround_1_hvc_start                NULL
+@@ -177,12 +173,25 @@ static void call_hvc_arch_workaround_1(v
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
++static void qcom_link_stack_sanitization(void)
++{
++      u64 tmp;
++
++      asm volatile("mov       %0, x30         \n"
++                   ".rept     16              \n"
++                   "bl        . + 4           \n"
++                   ".endr                     \n"
++                   "mov       x30, %0         \n"
++                   : "=&r" (tmp));
++}
++
+ static void
+ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ {
+       bp_hardening_cb_t cb;
+       void *smccc_start, *smccc_end;
+       struct arm_smccc_res res;
++      u32 midr = read_cpuid_id();
+       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+               return;
+@@ -215,30 +224,14 @@ enable_smccc_arch_workaround_1(const str
+               return;
+       }
++      if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
++          ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
++              cb = qcom_link_stack_sanitization;
++
+       install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+       return;
+ }
+-
+-static void qcom_link_stack_sanitization(void)
+-{
+-      u64 tmp;
+-
+-      asm volatile("mov       %0, x30         \n"
+-                   ".rept     16              \n"
+-                   "bl        . + 4           \n"
+-                   ".endr                     \n"
+-                   "mov       x30, %0         \n"
+-                   : "=&r" (tmp));
+-}
+-
+-static void
+-qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
+-{
+-      install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
+-                              __qcom_hyp_sanitize_link_stack_start,
+-                              __qcom_hyp_sanitize_link_stack_end);
+-}
+ #endif        /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+ #ifdef CONFIG_ARM64_SSBD
+@@ -463,10 +456,6 @@ static const struct midr_range arm64_bp_
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+       MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+       MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+-      {},
+-};
+-
+-static const struct midr_range qcom_bp_harden_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+       {},
+@@ -618,15 +607,6 @@ const struct arm64_cpu_capabilities arm6
+               ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+               .cpu_enable = enable_smccc_arch_workaround_1,
+       },
+-      {
+-              .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+-              ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
+-              .cpu_enable = qcom_enable_link_stack_sanitization,
+-      },
+-      {
+-              .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+-              ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
+-      },
+ #endif
+ #ifdef CONFIG_ARM64_SSBD
+       {
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -196,15 +196,3 @@ alternative_endif
+       eret
+ ENDPROC(__fpsimd_guest_restore)
+-
+-ENTRY(__qcom_hyp_sanitize_btac_predictors)
+-      /**
+-       * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
+-       * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
+-       * b15-b0: contains SiP functionID
+-       */
+-      movz    x0, #0x1700
+-      movk    x0, #0xc200, lsl #16
+-      smc     #0
+-      ret
+-ENDPROC(__qcom_hyp_sanitize_btac_predictors)
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -405,16 +405,6 @@ again:
+       __set_host_arch_workaround_state(vcpu);
+-      if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
+-              u32 midr = read_cpuid_id();
+-
+-              /* Apply BTAC predictors mitigation to all Falkor chips */
+-              if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+-                  ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
+-                      __qcom_hyp_sanitize_btac_predictors();
+-              }
+-      }
+-
+       fp_enabled = __fpsimd_enabled();
+       __sysreg_save_guest_state(guest_ctxt);
diff --git a/queue-4.14/arm64-move-sctlr_el-1-2-assertions-to-asm-sysreg.h.patch b/queue-4.14/arm64-move-sctlr_el-1-2-assertions-to-asm-sysreg.h.patch
new file mode 100644 (file)
index 0000000..29ac12d
--- /dev/null
@@ -0,0 +1,100 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:52 +0200
+Subject: arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h>
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dave Martin <dave.martin@arm.com>, James Morse <james.morse@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-8-ard.biesheuvel@linaro.org>
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 1c312e84c2d71da4101754fa6118f703f7473e01 ]
+
+Currently we assert that the SCTLR_EL{1,2}_{SET,CLEAR} bits are
+self-consistent with an assertion in config_sctlr_el1(). This is a bit
+unusual, since config_sctlr_el1() doesn't make use of these definitions,
+and is far away from the definitions themselves.
+
+We can use the CPP #error directive to have equivalent assertions in
+<asm/sysreg.h>, next to the definitions of the set/clear bits, which is
+a bit clearer and simpler.
+
+At the same time, lets fill in the upper 32 bits for both registers in
+their respective RES0 definitions. This could be a little nicer with
+GENMASK_ULL(63, 32), but this currently lives in <linux/bitops.h>, which
+cannot safely be included from assembly, as <asm/sysreg.h> can.
+
+Note the when the preprocessor evaluates an expression for an #if
+directive, all signed or unsigned values are treated as intmax_t or
+uintmax_t respectively. To avoid ambiguity, we define explicitly define
+the mask of all 64 bits.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Dave Martin <dave.martin@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/sysreg.h |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -315,7 +315,8 @@
+ #define SCTLR_EL2_RES0        ((1 << 6)  | (1 << 7)  | (1 << 8)  | (1 << 9)  | \
+                        (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
+                        (1 << 17) | (1 << 20) | (1 << 21) | (1 << 24) | \
+-                       (1 << 26) | (1 << 27) | (1 << 30) | (1 << 31))
++                       (1 << 26) | (1 << 27) | (1 << 30) | (1 << 31) | \
++                       (0xffffffffUL << 32))
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL2                SCTLR_ELx_EE
+@@ -331,9 +332,9 @@
+                        SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
+                        ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+-/* Check all the bits are accounted for */
+-#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
+-
++#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
++#error "Inconsistent SCTLR_EL2 set/clear bits"
++#endif
+ /* SCTLR_EL1 specific flags. */
+ #define SCTLR_EL1_UCI         (1 << 26)
+@@ -352,7 +353,8 @@
+ #define SCTLR_EL1_RES1        ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
+                        (1 << 29))
+ #define SCTLR_EL1_RES0  ((1 << 6)  | (1 << 10) | (1 << 13) | (1 << 17) | \
+-                       (1 << 21) | (1 << 27) | (1 << 30) | (1 << 31))
++                       (1 << 21) | (1 << 27) | (1 << 30) | (1 << 31) | \
++                       (0xffffffffUL << 32))
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL1                (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+@@ -371,8 +373,9 @@
+                        SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
+                        SCTLR_EL1_RES0)
+-/* Check all the bits are accounted for */
+-#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
++#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
++#error "Inconsistent SCTLR_EL1 set/clear bits"
++#endif
+ /* id_aa64isar0 */
+ #define ID_AA64ISAR0_TS_SHIFT         52
+@@ -585,9 +588,6 @@ static inline void config_sctlr_el1(u32
+ {
+       u32 val;
+-      SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
+-      SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
+-
+       val = read_sysreg(sctlr_el1);
+       val &= ~clear;
+       val |= set;
diff --git a/queue-4.14/arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch b/queue-4.14/arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch
new file mode 100644 (file)
index 0000000..17a5c3f
--- /dev/null
@@ -0,0 +1,76 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:25 +0200
+Subject: arm64: Provide a command line to disable spectre_v2 mitigation
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Jonathan Corbet <corbet@lwn.net>, linux-doc@vger.kernel.org, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-41-ard.biesheuvel@linaro.org>
+
+From: Jeremy Linton <jeremy.linton@arm.com>
+
+[ Upstream commit e5ce5e7267ddcbe13ab9ead2542524e1b7993e5a ]
+
+There are various reasons, such as benchmarking, to disable spectrev2
+mitigation on a machine. Provide a command-line option to do so.
+
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: linux-doc@vger.kernel.org
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt |    8 ++++----
+ arch/arm64/kernel/cpu_errata.c                  |   13 +++++++++++++
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2745,10 +2745,10 @@
+                       (bounds check bypass). With this option data leaks
+                       are possible in the system.
+-      nospectre_v2    [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
+-                      (indirect branch prediction) vulnerability. System may
+-                      allow data leaks with this option, which is equivalent
+-                      to spectre_v2=off.
++      nospectre_v2    [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
++                      the Spectre variant 2 (indirect branch prediction)
++                      vulnerability. System may allow data leaks with this
++                      option.
+       nospec_store_bypass_disable
+                       [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -181,6 +181,14 @@ static void qcom_link_stack_sanitization
+                    : "=&r" (tmp));
+ }
++static bool __nospectre_v2;
++static int __init parse_nospectre_v2(char *str)
++{
++      __nospectre_v2 = true;
++      return 0;
++}
++early_param("nospectre_v2", parse_nospectre_v2);
++
+ static void
+ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+ {
+@@ -192,6 +200,11 @@ enable_smccc_arch_workaround_1(const str
+       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+               return;
++      if (__nospectre_v2) {
++              pr_info_once("spectrev2 mitigation disabled by command line option\n");
++              return;
++      }
++
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+               return;
diff --git a/queue-4.14/arm64-speculation-support-mitigations-cmdline-option.patch b/queue-4.14/arm64-speculation-support-mitigations-cmdline-option.patch
new file mode 100644 (file)
index 0000000..b7fb62d
--- /dev/null
@@ -0,0 +1,118 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:33 +0200
+Subject: arm64/speculation: Support 'mitigations=' cmdline option
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Josh Poimboeuf <jpoimboe@redhat.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-49-ard.biesheuvel@linaro.org>
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+[ Upstream commit a111b7c0f20e13b54df2fa959b3dc0bdf1925ae6 ]
+
+Configure arm64 runtime CPU speculation bug mitigations in accordance
+with the 'mitigations=' cmdline option.  This affects Meltdown, Spectre
+v2, and Speculative Store Bypass.
+
+The default behavior is unchanged.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+[will: reorder checks so KASLR implies KPTI and SSBS is affected by cmdline]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt |    8 +++++---
+ arch/arm64/kernel/cpu_errata.c                  |    6 +++++-
+ arch/arm64/kernel/cpufeature.c                  |    8 +++++++-
+ 3 files changed, 17 insertions(+), 5 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2389,8 +2389,8 @@
+                       http://repo.or.cz/w/linux-2.6/mini2440.git
+       mitigations=
+-                      [X86,PPC,S390] Control optional mitigations for CPU
+-                      vulnerabilities.  This is a set of curated,
++                      [X86,PPC,S390,ARM64] Control optional mitigations for
++                      CPU vulnerabilities.  This is a set of curated,
+                       arch-independent options, each of which is an
+                       aggregation of existing arch-specific options.
+@@ -2399,12 +2399,14 @@
+                               improves system performance, but it may also
+                               expose users to several CPU vulnerabilities.
+                               Equivalent to: nopti [X86,PPC]
++                                             kpti=0 [ARM64]
+                                              nospectre_v1 [PPC]
+                                              nobp=0 [S390]
+                                              nospectre_v1 [X86]
+-                                             nospectre_v2 [X86,PPC,S390]
++                                             nospectre_v2 [X86,PPC,S390,ARM64]
+                                              spectre_v2_user=off [X86]
+                                              spec_store_bypass_disable=off [X86,PPC]
++                                             ssbd=force-off [ARM64]
+                                              l1tf=off [X86]
+                                              mds=off [X86]
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -19,6 +19,7 @@
+ #include <linux/arm-smccc.h>
+ #include <linux/psci.h>
+ #include <linux/types.h>
++#include <linux/cpu.h>
+ #include <asm/cpu.h>
+ #include <asm/cputype.h>
+ #include <asm/cpufeature.h>
+@@ -347,6 +348,9 @@ static bool has_ssbd_mitigation(const st
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++      if (cpu_mitigations_off())
++              ssbd_state = ARM64_SSBD_FORCE_DISABLE;
++
+       /* delay setting __ssb_safe until we get a firmware response */
+       if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+               this_cpu_safe = true;
+@@ -544,7 +548,7 @@ check_branch_predictor(const struct arm6
+       }
+       /* forced off */
+-      if (__nospectre_v2) {
++      if (__nospectre_v2 || cpu_mitigations_off()) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
+               __hardenbp_enab = false;
+               return false;
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -24,6 +24,7 @@
+ #include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <linux/mm.h>
++#include <linux/cpu.h>
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+ #include <asm/cpu_ops.h>
+@@ -841,7 +842,7 @@ static bool unmap_kernel_at_el0(const st
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+       };
+-      char const *str = "command line option";
++      char const *str = "kpti command line option";
+       bool meltdown_safe;
+       meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+@@ -871,6 +872,11 @@ static bool unmap_kernel_at_el0(const st
+               }
+       }
++      if (cpu_mitigations_off() && !__kpti_forced) {
++              str = "mitigations=off";
++              __kpti_forced = -1;
++      }
++
+       if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+               pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+               return false;
diff --git a/queue-4.14/arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch b/queue-4.14/arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch
new file mode 100644 (file)
index 0000000..4bef7e4
--- /dev/null
@@ -0,0 +1,299 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:18 +0200
+Subject: arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-34-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit 8f04e8e6e29c93421a95b61cad62e3918425eac7 ]
+
+On CPUs with support for PSTATE.SSBS, the kernel can toggle the SSBD
+state without needing to call into firmware.
+
+This patch hooks into the existing SSBD infrastructure so that SSBS is
+used on CPUs that support it, but it's all made horribly complicated by
+the very real possibility of big/little systems that don't uniformly
+provide the new capability.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ardb: add #include of asm/compat.h]
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/processor.h   |    7 +++++
+ arch/arm64/include/asm/ptrace.h      |    1 
+ arch/arm64/include/asm/sysreg.h      |    3 ++
+ arch/arm64/include/uapi/asm/ptrace.h |    1 
+ arch/arm64/kernel/cpu_errata.c       |   26 ++++++++++++++++++--
+ arch/arm64/kernel/cpufeature.c       |   45 +++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/process.c          |    4 +++
+ arch/arm64/kernel/ssbd.c             |   22 +++++++++++++++++
+ 8 files changed, 107 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -153,6 +153,10 @@ static inline void start_thread(struct p
+ {
+       start_thread_common(regs, pc);
+       regs->pstate = PSR_MODE_EL0t;
++
++      if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
++              regs->pstate |= PSR_SSBS_BIT;
++
+       regs->sp = sp;
+ }
+@@ -169,6 +173,9 @@ static inline void compat_start_thread(s
+       regs->pstate |= COMPAT_PSR_E_BIT;
+ #endif
++      if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
++              regs->pstate |= PSR_AA32_SSBS_BIT;
++
+       regs->compat_sp = sp;
+ }
+ #endif
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -50,6 +50,7 @@
+ #define PSR_AA32_I_BIT                0x00000080
+ #define PSR_AA32_A_BIT                0x00000100
+ #define PSR_AA32_E_BIT                0x00000200
++#define PSR_AA32_SSBS_BIT     0x00800000
+ #define PSR_AA32_DIT_BIT      0x01000000
+ #define PSR_AA32_Q_BIT                0x08000000
+ #define PSR_AA32_V_BIT                0x10000000
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -86,11 +86,14 @@
+ #define REG_PSTATE_PAN_IMM            sys_reg(0, 0, 4, 0, 4)
+ #define REG_PSTATE_UAO_IMM            sys_reg(0, 0, 4, 0, 3)
++#define REG_PSTATE_SSBS_IMM           sys_reg(0, 3, 4, 0, 1)
+ #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM |       \
+                                     (!!x)<<8 | 0x1f)
+ #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM |       \
+                                     (!!x)<<8 | 0x1f)
++#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
++                                     (!!x)<<8 | 0x1f)
+ #define SYS_DC_ISW                    sys_insn(1, 0, 7, 6, 2)
+ #define SYS_DC_CSW                    sys_insn(1, 0, 7, 10, 2)
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -45,6 +45,7 @@
+ #define PSR_I_BIT     0x00000080
+ #define PSR_A_BIT     0x00000100
+ #define PSR_D_BIT     0x00000200
++#define PSR_SSBS_BIT  0x00001000
+ #define PSR_PAN_BIT   0x00400000
+ #define PSR_UAO_BIT   0x00800000
+ #define PSR_Q_BIT     0x08000000
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -304,6 +304,14 @@ void __init arm64_enable_wa2_handling(st
+ void arm64_set_ssbd_mitigation(bool state)
+ {
++      if (this_cpu_has_cap(ARM64_SSBS)) {
++              if (state)
++                      asm volatile(SET_PSTATE_SSBS(0));
++              else
++                      asm volatile(SET_PSTATE_SSBS(1));
++              return;
++      }
++
+       switch (psci_ops.conduit) {
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+@@ -328,6 +336,11 @@ static bool has_ssbd_mitigation(const st
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++      if (this_cpu_has_cap(ARM64_SSBS)) {
++              required = false;
++              goto out_printmsg;
++      }
++
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+               ssbd_state = ARM64_SSBD_UNKNOWN;
+               return false;
+@@ -376,7 +389,6 @@ static bool has_ssbd_mitigation(const st
+       switch (ssbd_state) {
+       case ARM64_SSBD_FORCE_DISABLE:
+-              pr_info_once("%s disabled from command-line\n", entry->desc);
+               arm64_set_ssbd_mitigation(false);
+               required = false;
+               break;
+@@ -389,7 +401,6 @@ static bool has_ssbd_mitigation(const st
+               break;
+       case ARM64_SSBD_FORCE_ENABLE:
+-              pr_info_once("%s forced from command-line\n", entry->desc);
+               arm64_set_ssbd_mitigation(true);
+               required = true;
+               break;
+@@ -399,6 +410,17 @@ static bool has_ssbd_mitigation(const st
+               break;
+       }
++out_printmsg:
++      switch (ssbd_state) {
++      case ARM64_SSBD_FORCE_DISABLE:
++              pr_info_once("%s disabled from command-line\n", entry->desc);
++              break;
++
++      case ARM64_SSBD_FORCE_ENABLE:
++              pr_info_once("%s forced from command-line\n", entry->desc);
++              break;
++      }
++
+       return required;
+ }
+ #endif        /* CONFIG_ARM64_SSBD */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -925,6 +925,48 @@ static void cpu_copy_el2regs(const struc
+               write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+ }
++#ifdef CONFIG_ARM64_SSBD
++static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
++{
++      if (user_mode(regs))
++              return 1;
++
++      if (instr & BIT(CRm_shift))
++              regs->pstate |= PSR_SSBS_BIT;
++      else
++              regs->pstate &= ~PSR_SSBS_BIT;
++
++      arm64_skip_faulting_instruction(regs, 4);
++      return 0;
++}
++
++static struct undef_hook ssbs_emulation_hook = {
++      .instr_mask     = ~(1U << CRm_shift),
++      .instr_val      = 0xd500001f | REG_PSTATE_SSBS_IMM,
++      .fn             = ssbs_emulation_handler,
++};
++
++static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
++{
++      static bool undef_hook_registered = false;
++      static DEFINE_SPINLOCK(hook_lock);
++
++      spin_lock(&hook_lock);
++      if (!undef_hook_registered) {
++              register_undef_hook(&ssbs_emulation_hook);
++              undef_hook_registered = true;
++      }
++      spin_unlock(&hook_lock);
++
++      if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
++              sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
++              arm64_set_ssbd_mitigation(false);
++      } else {
++              arm64_set_ssbd_mitigation(true);
++      }
++}
++#endif /* CONFIG_ARM64_SSBD */
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+       {
+               .desc = "GIC system register CPU interface",
+@@ -1049,6 +1091,7 @@ static const struct arm64_cpu_capabiliti
+               .min_field_value = 1,
+       },
+ #endif
++#ifdef CONFIG_ARM64_SSBD
+       {
+               .desc = "Speculative Store Bypassing Safe (SSBS)",
+               .capability = ARM64_SSBS,
+@@ -1058,7 +1101,9 @@ static const struct arm64_cpu_capabiliti
+               .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
++              .cpu_enable = cpu_enable_ssbs,
+       },
++#endif
+       {},
+ };
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -296,6 +296,10 @@ int copy_thread(unsigned long clone_flag
+               if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+                   cpus_have_const_cap(ARM64_HAS_UAO))
+                       childregs->pstate |= PSR_UAO_BIT;
++
++              if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++                      childregs->pstate |= PSR_SSBS_BIT;
++
+               p->thread.cpu_context.x19 = stack_start;
+               p->thread.cpu_context.x20 = stk_sz;
+       }
+--- a/arch/arm64/kernel/ssbd.c
++++ b/arch/arm64/kernel/ssbd.c
+@@ -3,13 +3,32 @@
+  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+  */
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/prctl.h>
+ #include <linux/sched.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/thread_info.h>
++#include <asm/compat.h>
+ #include <asm/cpufeature.h>
++static void ssbd_ssbs_enable(struct task_struct *task)
++{
++      u64 val = is_compat_thread(task_thread_info(task)) ?
++                PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
++
++      task_pt_regs(task)->pstate |= val;
++}
++
++static void ssbd_ssbs_disable(struct task_struct *task)
++{
++      u64 val = is_compat_thread(task_thread_info(task)) ?
++                PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
++
++      task_pt_regs(task)->pstate &= ~val;
++}
++
+ /*
+  * prctl interface for SSBD
+  */
+@@ -45,12 +64,14 @@ static int ssbd_prctl_set(struct task_st
+                       return -EPERM;
+               task_clear_spec_ssb_disable(task);
+               clear_tsk_thread_flag(task, TIF_SSBD);
++              ssbd_ssbs_enable(task);
+               break;
+       case PR_SPEC_DISABLE:
+               if (state == ARM64_SSBD_FORCE_DISABLE)
+                       return -EPERM;
+               task_set_spec_ssb_disable(task);
+               set_tsk_thread_flag(task, TIF_SSBD);
++              ssbd_ssbs_disable(task);
+               break;
+       case PR_SPEC_FORCE_DISABLE:
+               if (state == ARM64_SSBD_FORCE_DISABLE)
+@@ -58,6 +79,7 @@ static int ssbd_prctl_set(struct task_st
+               task_set_spec_ssb_disable(task);
+               task_set_spec_ssb_force_disable(task);
+               set_tsk_thread_flag(task, TIF_SSBD);
++              ssbd_ssbs_disable(task);
+               break;
+       default:
+               return -ERANGE;
diff --git a/queue-4.14/arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch b/queue-4.14/arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
new file mode 100644 (file)
index 0000000..4055d9d
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:30 +0200
+Subject: arm64: ssbs: Don't treat CPUs with SSBS as unaffected by SSB
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-46-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit eb337cdfcd5dd3b10522c2f34140a73a4c285c30 ]
+
+SSBS provides a relatively cheap mitigation for SSB, but it is still a
+mitigation and its presence does not indicate that the CPU is unaffected
+by the vulnerability.
+
+Tweak the mitigation logic so that we report the correct string in sysfs.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -333,15 +333,17 @@ static bool has_ssbd_mitigation(const st
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++      /* delay setting __ssb_safe until we get a firmware response */
++      if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
++              this_cpu_safe = true;
++
+       if (this_cpu_has_cap(ARM64_SSBS)) {
++              if (!this_cpu_safe)
++                      __ssb_safe = false;
+               required = false;
+               goto out_printmsg;
+       }
+-      /* delay setting __ssb_safe until we get a firmware response */
+-      if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+-              this_cpu_safe = true;
+-
+       if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+               ssbd_state = ARM64_SSBD_UNKNOWN;
+               if (!this_cpu_safe)
diff --git a/queue-4.14/arm64-sysreg-move-to-use-definitions-for-all-the-sctlr-bits.patch b/queue-4.14/arm64-sysreg-move-to-use-definitions-for-all-the-sctlr-bits.patch
new file mode 100644 (file)
index 0000000..d5e2aff
--- /dev/null
@@ -0,0 +1,220 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:46 +0200
+Subject: arm64: sysreg: Move to use definitions for all the SCTLR bits
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, James Morse <james.morse@arm.com>
+Message-ID: <20191024124833.4158-2-ard.biesheuvel@linaro.org>
+
+From: James Morse <james.morse@arm.com>
+
+[ Upstream commit 7a00d68ebe5f07cb1db17e7fedfd031f0d87e8bb ]
+
+__cpu_setup() configures SCTLR_EL1 using some hard coded hex masks,
+and el2_setup() duplicates some this when setting RES1 bits.
+
+Lets make this the same as KVM's hyp_init, which uses named bits.
+
+First, we add definitions for all the SCTLR_EL{1,2} bits, the RES{1,0}
+bits, and those we want to set or clear.
+
+Add a build_bug checks to ensures all bits are either set or clear.
+This means we don't need to preserve endian-ness configuration
+generated elsewhere.
+
+Finally, move the head.S and proc.S users of these hard-coded masks
+over to the macro versions.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/sysreg.h |   65 ++++++++++++++++++++++++++++++++++++++--
+ arch/arm64/kernel/head.S        |   13 +-------
+ arch/arm64/mm/proc.S            |   24 --------------
+ 3 files changed, 67 insertions(+), 35 deletions(-)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -20,6 +20,7 @@
+ #ifndef __ASM_SYSREG_H
+ #define __ASM_SYSREG_H
++#include <asm/compiler.h>
+ #include <linux/stringify.h>
+ /*
+@@ -297,25 +298,81 @@
+ /* Common SCTLR_ELx flags. */
+ #define SCTLR_ELx_EE    (1 << 25)
++#define SCTLR_ELx_WXN (1 << 19)
+ #define SCTLR_ELx_I   (1 << 12)
+ #define SCTLR_ELx_SA  (1 << 3)
+ #define SCTLR_ELx_C   (1 << 2)
+ #define SCTLR_ELx_A   (1 << 1)
+ #define SCTLR_ELx_M   1
++#define SCTLR_ELx_FLAGS       (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
++                       SCTLR_ELx_SA | SCTLR_ELx_I)
++
++/* SCTLR_EL2 specific flags. */
+ #define SCTLR_EL2_RES1        ((1 << 4)  | (1 << 5)  | (1 << 11) | (1 << 16) | \
+                        (1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
+                        (1 << 29))
++#define SCTLR_EL2_RES0        ((1 << 6)  | (1 << 7)  | (1 << 8)  | (1 << 9)  | \
++                       (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
++                       (1 << 17) | (1 << 20) | (1 << 21) | (1 << 24) | \
++                       (1 << 26) | (1 << 27) | (1 << 30) | (1 << 31))
++
++#ifdef CONFIG_CPU_BIG_ENDIAN
++#define ENDIAN_SET_EL2                SCTLR_ELx_EE
++#define ENDIAN_CLEAR_EL2      0
++#else
++#define ENDIAN_SET_EL2                0
++#define ENDIAN_CLEAR_EL2      SCTLR_ELx_EE
++#endif
++
++/* SCTLR_EL2 value used for the hyp-stub */
++#define SCTLR_EL2_SET (ENDIAN_SET_EL2   | SCTLR_EL2_RES1)
++#define SCTLR_EL2_CLEAR       (SCTLR_ELx_M      | SCTLR_ELx_A    | SCTLR_ELx_C   | \
++                       SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
++                       ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
++
++/* Check all the bits are accounted for */
++#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
+-#define SCTLR_ELx_FLAGS       (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+-                       SCTLR_ELx_SA | SCTLR_ELx_I)
+ /* SCTLR_EL1 specific flags. */
+ #define SCTLR_EL1_UCI         (1 << 26)
++#define SCTLR_EL1_E0E         (1 << 24)
+ #define SCTLR_EL1_SPAN                (1 << 23)
++#define SCTLR_EL1_NTWE                (1 << 18)
++#define SCTLR_EL1_NTWI                (1 << 16)
+ #define SCTLR_EL1_UCT         (1 << 15)
++#define SCTLR_EL1_DZE         (1 << 14)
++#define SCTLR_EL1_UMA         (1 << 9)
+ #define SCTLR_EL1_SED         (1 << 8)
++#define SCTLR_EL1_ITD         (1 << 7)
+ #define SCTLR_EL1_CP15BEN     (1 << 5)
++#define SCTLR_EL1_SA0         (1 << 4)
++
++#define SCTLR_EL1_RES1        ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
++                       (1 << 29))
++#define SCTLR_EL1_RES0  ((1 << 6)  | (1 << 10) | (1 << 13) | (1 << 17) | \
++                       (1 << 21) | (1 << 27) | (1 << 30) | (1 << 31))
++
++#ifdef CONFIG_CPU_BIG_ENDIAN
++#define ENDIAN_SET_EL1                (SCTLR_EL1_E0E | SCTLR_ELx_EE)
++#define ENDIAN_CLEAR_EL1      0
++#else
++#define ENDIAN_SET_EL1                0
++#define ENDIAN_CLEAR_EL1      (SCTLR_EL1_E0E | SCTLR_ELx_EE)
++#endif
++
++#define SCTLR_EL1_SET (SCTLR_ELx_M    | SCTLR_ELx_C    | SCTLR_ELx_SA   |\
++                       SCTLR_EL1_SA0  | SCTLR_EL1_SED  | SCTLR_ELx_I    |\
++                       SCTLR_EL1_DZE  | SCTLR_EL1_UCT  | SCTLR_EL1_NTWI |\
++                       SCTLR_EL1_NTWE | SCTLR_EL1_SPAN | ENDIAN_SET_EL1 |\
++                       SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
++#define SCTLR_EL1_CLEAR       (SCTLR_ELx_A   | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD    |\
++                       SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
++                       SCTLR_EL1_RES0)
++
++/* Check all the bits are accounted for */
++#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+ /* id_aa64isar0 */
+ #define ID_AA64ISAR0_RDM_SHIFT                28
+@@ -463,6 +520,7 @@
+ #else
++#include <linux/build_bug.h>
+ #include <linux/types.h>
+ asm(
+@@ -519,6 +577,9 @@ static inline void config_sctlr_el1(u32
+ {
+       u32 val;
++      SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
++      SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
++
+       val = read_sysreg(sctlr_el1);
+       val &= ~clear;
+       val |= set;
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -388,17 +388,13 @@ ENTRY(el2_setup)
+       mrs     x0, CurrentEL
+       cmp     x0, #CurrentEL_EL2
+       b.eq    1f
+-      mrs     x0, sctlr_el1
+-CPU_BE(       orr     x0, x0, #(3 << 24)      )       // Set the EE and E0E bits for EL1
+-CPU_LE(       bic     x0, x0, #(3 << 24)      )       // Clear the EE and E0E bits for EL1
++      mov_q   x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
+       msr     sctlr_el1, x0
+       mov     w0, #BOOT_CPU_MODE_EL1          // This cpu booted in EL1
+       isb
+       ret
+-1:    mrs     x0, sctlr_el2
+-CPU_BE(       orr     x0, x0, #(1 << 25)      )       // Set the EE bit for EL2
+-CPU_LE(       bic     x0, x0, #(1 << 25)      )       // Clear the EE bit for EL2
++1:    mov_q   x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+       msr     sctlr_el2, x0
+ #ifdef CONFIG_ARM64_VHE
+@@ -505,10 +501,7 @@ install_el2_stub:
+        * requires no configuration, and all non-hyp-specific EL2 setup
+        * will be done via the _EL1 system register aliases in __cpu_setup.
+        */
+-      /* sctlr_el1 */
+-      mov     x0, #0x0800                     // Set/clear RES{1,0} bits
+-CPU_BE(       movk    x0, #0x33d0, lsl #16    )       // Set EE and E0E on BE systems
+-CPU_LE(       movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
++      mov_q   x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
+       msr     sctlr_el1, x0
+       /* Coprocessor traps. */
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -430,11 +430,7 @@ ENTRY(__cpu_setup)
+       /*
+        * Prepare SCTLR
+        */
+-      adr     x5, crval
+-      ldp     w5, w6, [x5]
+-      mrs     x0, sctlr_el1
+-      bic     x0, x0, x5                      // clear bits
+-      orr     x0, x0, x6                      // set bits
++      mov_q   x0, SCTLR_EL1_SET
+       /*
+        * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
+        * both user and kernel.
+@@ -470,21 +466,3 @@ ENTRY(__cpu_setup)
+       msr     tcr_el1, x10
+       ret                                     // return to head.S
+ ENDPROC(__cpu_setup)
+-
+-      /*
+-       * We set the desired value explicitly, including those of the
+-       * reserved bits. The values of bits EE & E0E were set early in
+-       * el2_setup, which are left untouched below.
+-       *
+-       *                 n n            T
+-       *       U E      WT T UD     US IHBS
+-       *       CE0      XWHW CZ     ME TEEA S
+-       * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
+-       * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
+-       * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
+-       */
+-      .type   crval, #object
+-crval:
+-      .word   0xfcffffff                      // clear
+-      .word   0x34d5d91d                      // set
+-      .popsection
diff --git a/queue-4.14/arm64-use-firmware-to-detect-cpus-that-are-not-affected-by-spectre-v2.patch b/queue-4.14/arm64-use-firmware-to-detect-cpus-that-are-not-affected-by-spectre-v2.patch
new file mode 100644 (file)
index 0000000..79e4d7c
--- /dev/null
@@ -0,0 +1,79 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:32 +0200
+Subject: arm64: Use firmware to detect CPUs that are not affected by Spectre-v2
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Marc Zyngier <marc.zyngier@arm.com>, Stefan Wahren <stefan.wahren@i2se.com>, Will Deacon <will.deacon@arm.com>
+Message-ID: <20191024124833.4158-48-ard.biesheuvel@linaro.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 517953c2c47f9c00a002f588ac856a5bc70cede3 ]
+
+The SMCCC ARCH_WORKAROUND_1 service can indicate that although the
+firmware knows about the Spectre-v2 mitigation, this particular
+CPU is not vulnerable, and it is thus not necessary to call
+the firmware on this CPU.
+
+Let's use this information to our benefit.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c |   32 +++++++++++++++++++++++---------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -190,22 +190,36 @@ static int detect_harden_bp_fw(void)
+       case PSCI_CONDUIT_HVC:
+               arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+-              if ((int)res.a0 < 0)
++              switch ((int)res.a0) {
++              case 1:
++                      /* Firmware says we're just fine */
++                      return 0;
++              case 0:
++                      cb = call_hvc_arch_workaround_1;
++                      /* This is a guest, no need to patch KVM vectors */
++                      smccc_start = NULL;
++                      smccc_end = NULL;
++                      break;
++              default:
+                       return -1;
+-              cb = call_hvc_arch_workaround_1;
+-              /* This is a guest, no need to patch KVM vectors */
+-              smccc_start = NULL;
+-              smccc_end = NULL;
++              }
+               break;
+       case PSCI_CONDUIT_SMC:
+               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+-              if ((int)res.a0 < 0)
++              switch ((int)res.a0) {
++              case 1:
++                      /* Firmware says we're just fine */
++                      return 0;
++              case 0:
++                      cb = call_smc_arch_workaround_1;
++                      smccc_start = __smccc_workaround_1_smc_start;
++                      smccc_end = __smccc_workaround_1_smc_end;
++                      break;
++              default:
+                       return -1;
+-              cb = call_smc_arch_workaround_1;
+-              smccc_start = __smccc_workaround_1_smc_start;
+-              smccc_end = __smccc_workaround_1_smc_end;
++              }
+               break;
+       default:
diff --git a/queue-4.14/arm64-v8.4-support-for-new-floating-point-multiplication-instructions.patch b/queue-4.14/arm64-v8.4-support-for-new-floating-point-multiplication-instructions.patch
new file mode 100644 (file)
index 0000000..0ae3a03
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:47:49 +0200
+Subject: arm64: v8.4: Support for new floating point multiplication instructions
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Dongjiu Geng <gengdongjiu@huawei.com>, Dave Martin <Dave.Martin@arm.com>
+Message-ID: <20191024124833.4158-5-ard.biesheuvel@linaro.org>
+
+From: Dongjiu Geng <gengdongjiu@huawei.com>
+
+[ Upstream commit 3b3b681097fae73b7f5dcdd42db6cfdf32943d4c ]
+
+ARM v8.4 extensions add new neon instructions for performing a
+multiplication of each FP16 element of one vector with the corresponding
+FP16 element of a second vector, and to add or subtract this without an
+intermediate rounding to the corresponding FP32 element in a third vector.
+
+This patch detects this feature and let the userspace know about it via a
+HWCAP bit and MRS emulation.
+
+Cc: Dave Martin <Dave.Martin@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[ardb: fix up for missing SVE in context]
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.txt |    4 +++-
+ arch/arm64/include/asm/sysreg.h               |    1 +
+ arch/arm64/include/uapi/asm/hwcap.h           |    2 ++
+ arch/arm64/kernel/cpufeature.c                |    2 ++
+ arch/arm64/kernel/cpuinfo.c                   |    2 ++
+ 5 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/Documentation/arm64/cpu-feature-registers.txt
++++ b/Documentation/arm64/cpu-feature-registers.txt
+@@ -110,7 +110,9 @@ infrastructure:
+      x--------------------------------------------------x
+      | Name                         |  bits   | visible |
+      |--------------------------------------------------|
+-     | RES0                         | [63-48] |    n    |
++     | RES0                         | [63-52] |    n    |
++     |--------------------------------------------------|
++     | FHM                          | [51-48] |    y    |
+      |--------------------------------------------------|
+      | DP                           | [47-44] |    y    |
+      |--------------------------------------------------|
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -375,6 +375,7 @@
+ #define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS   BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+ /* id_aa64isar0 */
++#define ID_AA64ISAR0_FHM_SHIFT                48
+ #define ID_AA64ISAR0_DP_SHIFT         44
+ #define ID_AA64ISAR0_SM4_SHIFT                40
+ #define ID_AA64ISAR0_SM3_SHIFT                36
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -42,5 +42,7 @@
+ #define HWCAP_SM4             (1 << 19)
+ #define HWCAP_ASIMDDP         (1 << 20)
+ #define HWCAP_SHA512          (1 << 21)
++#define HWCAP_SVE             (1 << 22)
++#define HWCAP_ASIMDFHM                (1 << 23)
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -107,6 +107,7 @@ cpufeature_pan_not_uao(const struct arm6
+  * sync with the documentation of the CPU feature register ABI.
+  */
+ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
+@@ -1052,6 +1053,7 @@ static const struct arm64_cpu_capabiliti
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
++      HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -74,6 +74,8 @@ static const char *const hwcap_str[] = {
+       "sm4",
+       "asimddp",
+       "sha512",
++      "sve",
++      "asimdfhm",
+       NULL
+ };
diff --git a/queue-4.14/asoc-rsnd-reinitialize-bit-clock-inversion-flag-for-every-format-setting.patch b/queue-4.14/asoc-rsnd-reinitialize-bit-clock-inversion-flag-for-every-format-setting.patch
new file mode 100644 (file)
index 0000000..ccea385
--- /dev/null
@@ -0,0 +1,42 @@
+From 22e58665a01006d05f0239621f7d41cacca96cc4 Mon Sep 17 00:00:00 2001
+From: Junya Monden <jmonden@jp.adit-jv.com>
+Date: Wed, 16 Oct 2019 14:42:55 +0200
+Subject: ASoC: rsnd: Reinitialize bit clock inversion flag for every format setting
+
+From: Junya Monden <jmonden@jp.adit-jv.com>
+
+commit 22e58665a01006d05f0239621f7d41cacca96cc4 upstream.
+
+Unlike other format-related DAI parameters, rdai->bit_clk_inv flag
+is not properly re-initialized when setting format for new stream
+processing. The inversion, if requested, is then applied not to default,
+but to a previous value, which leads to SCKP bit in SSICR register being
+set incorrectly.
+Fix this by re-setting the flag to its initial value, determined by format.
+
+Fixes: 1a7889ca8aba3 ("ASoC: rsnd: fixup SND_SOC_DAIFMT_xB_xF behavior")
+Cc: Andrew Gabbasov <andrew_gabbasov@mentor.com>
+Cc: Jiada Wang <jiada_wang@mentor.com>
+Cc: Timo Wischer <twischer@de.adit-jv.com>
+Cc: stable@vger.kernel.org # v3.17+
+Signed-off-by: Junya Monden <jmonden@jp.adit-jv.com>
+Signed-off-by: Eugeniu Rosca <erosca@de.adit-jv.com>
+Acked-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Link: https://lore.kernel.org/r/20191016124255.7442-1-erosca@de.adit-jv.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/sh/rcar/core.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -676,6 +676,7 @@ static int rsnd_soc_dai_set_fmt(struct s
+       }
+       /* set format */
++      rdai->bit_clk_inv = 0;
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               rdai->sys_delay = 0;
diff --git a/queue-4.14/cfg80211-wext-avoid-copying-malformed-ssids.patch b/queue-4.14/cfg80211-wext-avoid-copying-malformed-ssids.patch
new file mode 100644 (file)
index 0000000..c74e87f
--- /dev/null
@@ -0,0 +1,56 @@
+From 4ac2813cc867ae563a1ba5a9414bfb554e5796fa Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Fri, 4 Oct 2019 10:51:32 +0100
+Subject: cfg80211: wext: avoid copying malformed SSIDs
+
+From: Will Deacon <will@kernel.org>
+
+commit 4ac2813cc867ae563a1ba5a9414bfb554e5796fa upstream.
+
+Ensure the SSID element is bounds-checked prior to invoking memcpy()
+with its length field, when copying to userspace.
+
+Cc: <stable@vger.kernel.org>
+Cc: Kees Cook <keescook@chromium.org>
+Reported-by: Nicolas Waisman <nico@semmle.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20191004095132.15777-2-will@kernel.org
+[adjust commit log a bit]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/wireless/wext-sme.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/wireless/wext-sme.c
++++ b/net/wireless/wext-sme.c
+@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct ne
+                              struct iw_point *data, char *ssid)
+ {
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
++      int ret = 0;
+       /* call only for station! */
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct ne
+               if (ie) {
+                       data->flags = 1;
+                       data->length = ie[1];
+-                      memcpy(ssid, ie + 2, data->length);
++                      if (data->length > IW_ESSID_MAX_SIZE)
++                              ret = -EINVAL;
++                      else
++                              memcpy(ssid, ie + 2, data->length);
+               }
+               rcu_read_unlock();
+       } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
+@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct ne
+       }
+       wdev_unlock(wdev);
+-      return 0;
++      return ret;
+ }
+ int cfg80211_mgd_wext_siwap(struct net_device *dev,
diff --git a/queue-4.14/drivers-base-memory.c-don-t-access-uninitialized-memmaps-in-soft_offline_page_store.patch b/queue-4.14/drivers-base-memory.c-don-t-access-uninitialized-memmaps-in-soft_offline_page_store.patch
new file mode 100644 (file)
index 0000000..7cb80c6
--- /dev/null
@@ -0,0 +1,55 @@
+From 641fe2e9387a36f9ee01d7c69382d1fe147a5e98 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 18 Oct 2019 20:19:16 -0700
+Subject: drivers/base/memory.c: don't access uninitialized memmaps in soft_offline_page_store()
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 641fe2e9387a36f9ee01d7c69382d1fe147a5e98 upstream.
+
+Uninitialized memmaps contain garbage and in the worst case trigger kernel
+BUGs, especially with CONFIG_PAGE_POISONING.  They should not get touched.
+
+Right now, when trying to soft-offline a PFN that resides on a memory
+block that was never onlined, one gets a misleading error with
+CONFIG_PAGE_POISONING:
+
+  :/# echo 5637144576 > /sys/devices/system/memory/soft_offline_page
+  [   23.097167] soft offline: 0x150000 page already poisoned
+
+But the actual result depends on the garbage in the memmap.
+
+soft_offline_page() can only work with online pages, it returns -EIO in
+case of ZONE_DEVICE.  Make sure to only forward pages that are online
+(iow, managed by the buddy) and, therefore, have an initialized memmap.
+
+Add a check against pfn_to_online_page() and similarly return -EIO.
+
+Link: http://lkml.kernel.org/r/20191010141200.8985-1-david@redhat.com
+Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online")     [visible after d0dc12e86b319]
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: "Rafael J. Wysocki" <rafael@kernel.org>
+Cc: <stable@vger.kernel.org>   [4.13+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/memory.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -552,6 +552,9 @@ store_soft_offline_page(struct device *d
+       pfn >>= PAGE_SHIFT;
+       if (!pfn_valid(pfn))
+               return -ENXIO;
++      /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
++      if (!pfn_to_online_page(pfn))
++              return -EIO;
+       ret = soft_offline_page(pfn_to_page(pfn), 0);
+       return ret == 0 ? count : ret;
+ }
diff --git a/queue-4.14/drm-amdgpu-bail-earlier-when-amdgpu.cik_-si_support-is-not-set-to-1.patch b/queue-4.14/drm-amdgpu-bail-earlier-when-amdgpu.cik_-si_support-is-not-set-to-1.patch
new file mode 100644 (file)
index 0000000..c28a162
--- /dev/null
@@ -0,0 +1,123 @@
+From 984d7a929ad68b7be9990fc9c5cfa5d5c9fc7942 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 10 Oct 2019 18:28:17 +0200
+Subject: drm/amdgpu: Bail earlier when amdgpu.cik_/si_support is not set to 1
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 984d7a929ad68b7be9990fc9c5cfa5d5c9fc7942 upstream.
+
+Bail from the pci_driver probe function instead of from the drm_driver
+load function.
+
+This avoid /dev/dri/card0 temporarily getting registered and then
+unregistered again, sending unwanted add / remove udev events to
+userspace.
+
+Specifically this avoids triggering the (userspace) bug fixed by this
+plymouth merge-request:
+https://gitlab.freedesktop.org/plymouth/plymouth/merge_requests/59
+
+Note that despite that being a userspace bug, not sending unnecessary
+udev events is a good idea in general.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1490490
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |   35 ++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |   35 --------------------------------
+ 2 files changed, 35 insertions(+), 35 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -572,6 +572,41 @@ static int amdgpu_pci_probe(struct pci_d
+       if (ret == -EPROBE_DEFER)
+               return ret;
++#ifdef CONFIG_DRM_AMDGPU_SI
++      if (!amdgpu_si_support) {
++              switch (flags & AMD_ASIC_MASK) {
++              case CHIP_TAHITI:
++              case CHIP_PITCAIRN:
++              case CHIP_VERDE:
++              case CHIP_OLAND:
++              case CHIP_HAINAN:
++                      dev_info(&pdev->dev,
++                               "SI support provided by radeon.\n");
++                      dev_info(&pdev->dev,
++                               "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
++                              );
++                      return -ENODEV;
++              }
++      }
++#endif
++#ifdef CONFIG_DRM_AMDGPU_CIK
++      if (!amdgpu_cik_support) {
++              switch (flags & AMD_ASIC_MASK) {
++              case CHIP_KAVERI:
++              case CHIP_BONAIRE:
++              case CHIP_HAWAII:
++              case CHIP_KABINI:
++              case CHIP_MULLINS:
++                      dev_info(&pdev->dev,
++                               "CIK support provided by radeon.\n");
++                      dev_info(&pdev->dev,
++                               "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
++                              );
++                      return -ENODEV;
++              }
++      }
++#endif
++
+       /* Get rid of things like offb */
+       ret = amdgpu_kick_out_firmware_fb(pdev);
+       if (ret)
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_de
+       struct amdgpu_device *adev;
+       int r, acpi_status;
+-#ifdef CONFIG_DRM_AMDGPU_SI
+-      if (!amdgpu_si_support) {
+-              switch (flags & AMD_ASIC_MASK) {
+-              case CHIP_TAHITI:
+-              case CHIP_PITCAIRN:
+-              case CHIP_VERDE:
+-              case CHIP_OLAND:
+-              case CHIP_HAINAN:
+-                      dev_info(dev->dev,
+-                               "SI support provided by radeon.\n");
+-                      dev_info(dev->dev,
+-                               "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+-                              );
+-                      return -ENODEV;
+-              }
+-      }
+-#endif
+-#ifdef CONFIG_DRM_AMDGPU_CIK
+-      if (!amdgpu_cik_support) {
+-              switch (flags & AMD_ASIC_MASK) {
+-              case CHIP_KAVERI:
+-              case CHIP_BONAIRE:
+-              case CHIP_HAWAII:
+-              case CHIP_KABINI:
+-              case CHIP_MULLINS:
+-                      dev_info(dev->dev,
+-                               "CIK support provided by radeon.\n");
+-                      dev_info(dev->dev,
+-                               "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+-                              );
+-                      return -ENODEV;
+-              }
+-      }
+-#endif
+-
+       adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
+       if (adev == NULL) {
+               return -ENOMEM;
diff --git a/queue-4.14/drm-edid-add-6-bpc-quirk-for-sdc-panel-in-lenovo-g50.patch b/queue-4.14/drm-edid-add-6-bpc-quirk-for-sdc-panel-in-lenovo-g50.patch
new file mode 100644 (file)
index 0000000..11d7203
--- /dev/null
@@ -0,0 +1,35 @@
+From 11bcf5f78905b90baae8fb01e16650664ed0cb00 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Tue, 2 Apr 2019 11:30:37 +0800
+Subject: drm/edid: Add 6 bpc quirk for SDC panel in Lenovo G50
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit 11bcf5f78905b90baae8fb01e16650664ed0cb00 upstream.
+
+Another panel that needs 6BPC quirk.
+
+BugLink: https://bugs.launchpad.net/bugs/1819968
+Cc: <stable@vger.kernel.org> # v4.8+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190402033037.21877-1-kai.heng.feng@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_edid.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -164,6 +164,9 @@ static const struct edid_quirk {
+       /* Medion MD 30217 PG */
+       { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
++      /* Lenovo G50 */
++      { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
++
+       /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+       { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
diff --git a/queue-4.14/fs-proc-page.c-don-t-access-uninitialized-memmaps-in-fs-proc-page.c.patch b/queue-4.14/fs-proc-page.c-don-t-access-uninitialized-memmaps-in-fs-proc-page.c.patch
new file mode 100644 (file)
index 0000000..834b8f7
--- /dev/null
@@ -0,0 +1,147 @@
+From aad5f69bc161af489dbb5934868bd347282f0764 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 18 Oct 2019 20:19:20 -0700
+Subject: fs/proc/page.c: don't access uninitialized memmaps in fs/proc/page.c
+
+From: David Hildenbrand <david@redhat.com>
+
+commit aad5f69bc161af489dbb5934868bd347282f0764 upstream.
+
+There are three places where we access uninitialized memmaps, namely:
+- /proc/kpagecount
+- /proc/kpageflags
+- /proc/kpagecgroup
+
+We have initialized memmaps either when the section is online or when the
+page was initialized to the ZONE_DEVICE.  Uninitialized memmaps contain
+garbage and in the worst case trigger kernel BUGs, especially with
+CONFIG_PAGE_POISONING.
+
+For example, not onlining a DIMM during boot and calling /proc/kpagecount
+with CONFIG_PAGE_POISONING:
+
+  :/# cat /proc/kpagecount > tmp.test
+  BUG: unable to handle page fault for address: fffffffffffffffe
+  #PF: supervisor read access in kernel mode
+  #PF: error_code(0x0000) - not-present page
+  PGD 114616067 P4D 114616067 PUD 114618067 PMD 0
+  Oops: 0000 [#1] SMP NOPTI
+  CPU: 0 PID: 469 Comm: cat Not tainted 5.4.0-rc1-next-20191004+ #11
+  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.4
+  RIP: 0010:kpagecount_read+0xce/0x1e0
+  Code: e8 09 83 e0 3f 48 0f a3 02 73 2d 4c 89 e7 48 c1 e7 06 48 03 3d ab 51 01 01 74 1d 48 8b 57 08 480
+  RSP: 0018:ffffa14e409b7e78 EFLAGS: 00010202
+  RAX: fffffffffffffffe RBX: 0000000000020000 RCX: 0000000000000000
+  RDX: 0000000000000001 RSI: 00007f76b5595000 RDI: fffff35645000000
+  RBP: 00007f76b5595000 R08: 0000000000000001 R09: 0000000000000000
+  R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000140000
+  R13: 0000000000020000 R14: 00007f76b5595000 R15: ffffa14e409b7f08
+  FS:  00007f76b577d580(0000) GS:ffff8f41bd400000(0000) knlGS:0000000000000000
+  CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+  CR2: fffffffffffffffe CR3: 0000000078960000 CR4: 00000000000006f0
+  Call Trace:
+   proc_reg_read+0x3c/0x60
+   vfs_read+0xc5/0x180
+   ksys_read+0x68/0xe0
+   do_syscall_64+0x5c/0xa0
+   entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+For now, let's drop support for ZONE_DEVICE from the three pseudo files
+in order to fix this.  To distinguish offline memory (with garbage
+memmap) from ZONE_DEVICE memory with properly initialized memmaps, we
+would have to check get_dev_pagemap() and pfn_zone_device_reserved()
+right now.  The usage of both (especially, special casing devmem) is
+frowned upon and needs to be reworked.
+
+The fundamental issue we have is:
+
+       if (pfn_to_online_page(pfn)) {
+               /* memmap initialized */
+       } else if (pfn_valid(pfn)) {
+               /*
+                * ???
+                * a) offline memory. memmap garbage.
+                * b) devmem: memmap initialized to ZONE_DEVICE.
+                * c) devmem: reserved for driver. memmap garbage.
+                * (d) devmem: memmap currently initializing - garbage)
+                */
+       }
+
+We'll leave the pfn_zone_device_reserved() check in stable_page_flags()
+in place as that function is also used from memory failure.  We now no
+longer dump information about pages that are not in use anymore -
+offline.
+
+Link: http://lkml.kernel.org/r/20191009142435.3975-2-david@redhat.com
+Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online")     [visible after d0dc12e86b319]
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: Qian Cai <cai@lca.pw>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: Toshiki Fukasawa <t-fukasawa@vx.jp.nec.com>
+Cc: Pankaj gupta <pagupta@redhat.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Anthony Yznaga <anthony.yznaga@oracle.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Cc: <stable@vger.kernel.org>   [4.13+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/page.c |   28 ++++++++++++++++------------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+--- a/fs/proc/page.c
++++ b/fs/proc/page.c
+@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct fi
+               return -EINVAL;
+       while (count > 0) {
+-              if (pfn_valid(pfn))
+-                      ppage = pfn_to_page(pfn);
+-              else
+-                      ppage = NULL;
++              /*
++               * TODO: ZONE_DEVICE support requires to identify
++               * memmaps that were actually initialized.
++               */
++              ppage = pfn_to_online_page(pfn);
++
+               if (!ppage || PageSlab(ppage))
+                       pcount = 0;
+               else
+@@ -214,10 +216,11 @@ static ssize_t kpageflags_read(struct fi
+               return -EINVAL;
+       while (count > 0) {
+-              if (pfn_valid(pfn))
+-                      ppage = pfn_to_page(pfn);
+-              else
+-                      ppage = NULL;
++              /*
++               * TODO: ZONE_DEVICE support requires to identify
++               * memmaps that were actually initialized.
++               */
++              ppage = pfn_to_online_page(pfn);
+               if (put_user(stable_page_flags(ppage), out)) {
+                       ret = -EFAULT;
+@@ -259,10 +262,11 @@ static ssize_t kpagecgroup_read(struct f
+               return -EINVAL;
+       while (count > 0) {
+-              if (pfn_valid(pfn))
+-                      ppage = pfn_to_page(pfn);
+-              else
+-                      ppage = NULL;
++              /*
++               * TODO: ZONE_DEVICE support requires to identify
++               * memmaps that were actually initialized.
++               */
++              ppage = pfn_to_online_page(pfn);
+               if (ppage)
+                       ino = page_cgroup_ino(ppage);
diff --git a/queue-4.14/input-da9063-fix-capability-and-drop-key_sleep.patch b/queue-4.14/input-da9063-fix-capability-and-drop-key_sleep.patch
new file mode 100644 (file)
index 0000000..f5cdf9c
--- /dev/null
@@ -0,0 +1,41 @@
+From afce285b859cea91c182015fc9858ea58c26cd0e Mon Sep 17 00:00:00 2001
+From: Marco Felsch <m.felsch@pengutronix.de>
+Date: Mon, 16 Sep 2019 12:45:48 -0700
+Subject: Input: da9063 - fix capability and drop KEY_SLEEP
+
+From: Marco Felsch <m.felsch@pengutronix.de>
+
+commit afce285b859cea91c182015fc9858ea58c26cd0e upstream.
+
+Since commit f889beaaab1c ("Input: da9063 - report KEY_POWER instead of
+KEY_SLEEP during power key-press") KEY_SLEEP isn't supported anymore. This
+caused input device to not generate any events if "dlg,disable-key-power"
+is set.
+
+Fix this by unconditionally setting KEY_POWER capability, and not
+declaring KEY_SLEEP.
+
+Fixes: f889beaaab1c ("Input: da9063 - report KEY_POWER instead of KEY_SLEEP during power key-press")
+Signed-off-by: Marco Felsch <m.felsch@pengutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/misc/da9063_onkey.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/input/misc/da9063_onkey.c
++++ b/drivers/input/misc/da9063_onkey.c
+@@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct pla
+       onkey->input->phys = onkey->phys;
+       onkey->input->dev.parent = &pdev->dev;
+-      if (onkey->key_power)
+-              input_set_capability(onkey->input, EV_KEY, KEY_POWER);
+-
+-      input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
++      input_set_capability(onkey->input, EV_KEY, KEY_POWER);
+       INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
diff --git a/queue-4.14/input-synaptics-rmi4-avoid-processing-unknown-irqs.patch b/queue-4.14/input-synaptics-rmi4-avoid-processing-unknown-irqs.patch
new file mode 100644 (file)
index 0000000..ba772ea
--- /dev/null
@@ -0,0 +1,68 @@
+From 363c53875aef8fce69d4a2d0873919ccc7d9e2ad Mon Sep 17 00:00:00 2001
+From: Evan Green <evgreen@chromium.org>
+Date: Fri, 11 Oct 2019 17:22:09 -0700
+Subject: Input: synaptics-rmi4 - avoid processing unknown IRQs
+
+From: Evan Green <evgreen@chromium.org>
+
+commit 363c53875aef8fce69d4a2d0873919ccc7d9e2ad upstream.
+
+rmi_process_interrupt_requests() calls handle_nested_irq() for
+each interrupt status bit it finds. If the irq domain mapping for
+this bit had not yet been set up, then it ends up calling
+handle_nested_irq(0), which causes a NULL pointer dereference.
+
+There's already code that masks the irq_status bits coming out of the
+hardware with current_irq_mask, presumably to avoid this situation.
+However current_irq_mask seems to more reflect the actual mask set
+in the hardware rather than the IRQs software has set up and registered
+for. For example, in rmi_driver_reset_handler(), the current_irq_mask
+is initialized based on what is read from the hardware. If the reset
+value of this mask enables IRQs that Linux has not set up yet, then
+we end up in this situation.
+
+There appears to be a third unused bitmask that used to serve this
+purpose, fn_irq_bits. Use that bitmask instead of current_irq_mask
+to avoid calling handle_nested_irq() on IRQs that have not yet been
+set up.
+
+Signed-off-by: Evan Green <evgreen@chromium.org>
+Reviewed-by: Andrew Duggan <aduggan@synaptics.com>
+Link: https://lore.kernel.org/r/20191008223657.163366-1-evgreen@chromium.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/rmi4/rmi_driver.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/input/rmi4/rmi_driver.c
++++ b/drivers/input/rmi4/rmi_driver.c
+@@ -165,7 +165,7 @@ static int rmi_process_interrupt_request
+       }
+       mutex_lock(&data->irq_mutex);
+-      bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
++      bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
+              data->irq_count);
+       /*
+        * At this point, irq_status has all bits that are set in the
+@@ -412,6 +412,8 @@ static int rmi_driver_set_irq_bits(struc
+       bitmap_copy(data->current_irq_mask, data->new_irq_mask,
+                   data->num_of_irq_regs);
++      bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
++
+ error_unlock:
+       mutex_unlock(&data->irq_mutex);
+       return error;
+@@ -425,6 +427,8 @@ static int rmi_driver_clear_irq_bits(str
+       struct device *dev = &rmi_dev->dev;
+       mutex_lock(&data->irq_mutex);
++      bitmap_andnot(data->fn_irq_bits,
++                    data->fn_irq_bits, mask, data->irq_count);
+       bitmap_andnot(data->new_irq_mask,
+                 data->current_irq_mask, mask, data->irq_count);
diff --git a/queue-4.14/kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch b/queue-4.14/kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch
new file mode 100644 (file)
index 0000000..ff44282
--- /dev/null
@@ -0,0 +1,69 @@
+From foo@baz Sun 27 Oct 2019 09:50:54 AM CET
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Oct 2019 14:48:19 +0200
+Subject: KVM: arm64: Set SCTLR_EL2.DSSBS if SSBD is forcefully disabled and !vhe
+To: stable@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, Marc Zyngier <maz@kernel.org>, Mark Rutland <mark.rutland@arm.com>, Suzuki K Poulose <suzuki.poulose@arm.com>, Jeremy Linton <jeremy.linton@arm.com>, Andre Przywara <andre.przywara@arm.com>, Alexandru Elisei <alexandru.elisei@arm.com>, Will Deacon <will.deacon@arm.com>, Christoffer Dall <christoffer.dall@arm.com>
+Message-ID: <20191024124833.4158-35-ard.biesheuvel@linaro.org>
+
+From: Will Deacon <will.deacon@arm.com>
+
+[ Upstream commit 7c36447ae5a090729e7b129f24705bb231a07e0b ]
+
+When running without VHE, it is necessary to set SCTLR_EL2.DSSBS if SSBD
+has been forcefully disabled on the kernel command-line.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h |   11 +++++++++++
+ arch/arm64/kvm/hyp/sysreg-sr.c    |   11 +++++++++++
+ 2 files changed, 22 insertions(+)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -356,6 +356,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struc
+ void __kvm_set_tpidr_el2(u64 tpidr_el2);
+ DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
++void __kvm_enable_ssbs(void);
++
+ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+                                      unsigned long hyp_stack_ptr,
+                                      unsigned long vector_ptr)
+@@ -380,6 +382,15 @@ static inline void __cpu_init_hyp_mode(p
+               - (u64)kvm_ksym_ref(kvm_host_cpu_state);
+       kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
++
++      /*
++       * Disabling SSBD on a non-VHE system requires us to enable SSBS
++       * at EL2.
++       */
++      if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
++          arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
++              kvm_call_hyp(__kvm_enable_ssbs);
++      }
+ }
+ static inline void kvm_arch_hardware_unsetup(void) {}
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -188,3 +188,14 @@ void __hyp_text __kvm_set_tpidr_el2(u64
+ {
+       asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
+ }
++
++void __hyp_text __kvm_enable_ssbs(void)
++{
++      u64 tmp;
++
++      asm volatile(
++      "mrs    %0, sctlr_el2\n"
++      "orr    %0, %0, %1\n"
++      "msr    sctlr_el2, %0"
++      : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
++}
diff --git a/queue-4.14/mac80211-reject-malformed-ssid-elements.patch b/queue-4.14/mac80211-reject-malformed-ssid-elements.patch
new file mode 100644 (file)
index 0000000..60aac2a
--- /dev/null
@@ -0,0 +1,46 @@
+From 4152561f5da3fca92af7179dd538ea89e248f9d0 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Fri, 4 Oct 2019 10:51:31 +0100
+Subject: mac80211: Reject malformed SSID elements
+
+From: Will Deacon <will@kernel.org>
+
+commit 4152561f5da3fca92af7179dd538ea89e248f9d0 upstream.
+
+Although this shouldn't occur in practice, it's a good idea to bounds
+check the length field of the SSID element prior to using it for things
+like allocations or memcpy operations.
+
+Cc: <stable@vger.kernel.org>
+Cc: Kees Cook <keescook@chromium.org>
+Reported-by: Nicolas Waisman <nico@semmle.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20191004095132.15777-1-will@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/mlme.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2430,7 +2430,8 @@ struct sk_buff *ieee80211_ap_probereq_ge
+       rcu_read_lock();
+       ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
+-      if (WARN_ON_ONCE(ssid == NULL))
++      if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
++                    "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
+               ssid_len = 0;
+       else
+               ssid_len = ssid[1];
+@@ -4756,7 +4757,7 @@ int ieee80211_mgd_assoc(struct ieee80211
+       rcu_read_lock();
+       ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+-      if (!ssidie) {
++      if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
+               rcu_read_unlock();
+               kfree(assoc_data);
+               return -EINVAL;
diff --git a/queue-4.14/mips-tlbex-fix-build_restore_pagemask-kscratch-restore.patch b/queue-4.14/mips-tlbex-fix-build_restore_pagemask-kscratch-restore.patch
new file mode 100644 (file)
index 0000000..c6001d6
--- /dev/null
@@ -0,0 +1,105 @@
+From b42aa3fd5957e4daf4b69129e5ce752a2a53e7d6 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paulburton@kernel.org>
+Date: Fri, 18 Oct 2019 15:38:48 -0700
+Subject: MIPS: tlbex: Fix build_restore_pagemask KScratch restore
+
+From: Paul Burton <paulburton@kernel.org>
+
+commit b42aa3fd5957e4daf4b69129e5ce752a2a53e7d6 upstream.
+
+build_restore_pagemask() will restore the value of register $1/$at when
+its restore_scratch argument is non-zero, and aims to do so by filling a
+branch delay slot. Commit 0b24cae4d535 ("MIPS: Add missing EHB in mtc0
+-> mfc0 sequence.") added an EHB instruction (Execution Hazard Barrier)
+prior to restoring $1 from a KScratch register, in order to resolve a
+hazard that can result in stale values of the KScratch register being
+observed. In particular, P-class CPUs from MIPS with out of order
+execution pipelines such as the P5600 & P6600 are affected.
+
+Unfortunately this EHB instruction was inserted in the branch delay slot
+causing the MFC0 instruction which performs the restoration to no longer
+execute along with the branch. The result is that the $1 register isn't
+actually restored, ie. the TLB refill exception handler clobbers it -
+which is exactly the problem the EHB is meant to avoid for the P-class
+CPUs.
+
+Similarly build_get_pgd_vmalloc() will restore the value of $1/$at when
+its mode argument equals refill_scratch, and suffers from the same
+problem.
+
+Fix this by in both cases moving the EHB earlier in the emitted code.
+There's no reason it needs to immediately precede the MFC0 - it simply
+needs to be between the MTC0 & MFC0.
+
+This bug only affects Cavium Octeon systems which use
+build_fast_tlb_refill_handler().
+
+Signed-off-by: Paul Burton <paulburton@kernel.org>
+Fixes: 0b24cae4d535 ("MIPS: Add missing EHB in mtc0 -> mfc0 sequence.")
+Cc: Dmitry Korotin <dkorotin@wavecomp.com>
+Cc: stable@vger.kernel.org # v3.15+
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/mm/tlbex.c |   23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -658,6 +658,13 @@ static void build_restore_pagemask(u32 *
+                                  int restore_scratch)
+ {
+       if (restore_scratch) {
++              /*
++               * Ensure the MFC0 below observes the value written to the
++               * KScratch register by the prior MTC0.
++               */
++              if (scratch_reg >= 0)
++                      uasm_i_ehb(p);
++
+               /* Reset default page size */
+               if (PM_DEFAULT_MASK >> 16) {
+                       uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
+@@ -672,12 +679,10 @@ static void build_restore_pagemask(u32 *
+                       uasm_i_mtc0(p, 0, C0_PAGEMASK);
+                       uasm_il_b(p, r, lid);
+               }
+-              if (scratch_reg >= 0) {
+-                      uasm_i_ehb(p);
++              if (scratch_reg >= 0)
+                       UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+-              } else {
++              else
+                       UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+-              }
+       } else {
+               /* Reset default page size */
+               if (PM_DEFAULT_MASK >> 16) {
+@@ -926,6 +931,10 @@ build_get_pgd_vmalloc64(u32 **p, struct
+       }
+       if (mode != not_refill && check_for_high_segbits) {
+               uasm_l_large_segbits_fault(l, *p);
++
++              if (mode == refill_scratch && scratch_reg >= 0)
++                      uasm_i_ehb(p);
++
+               /*
+                * We get here if we are an xsseg address, or if we are
+                * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
+@@ -942,12 +951,10 @@ build_get_pgd_vmalloc64(u32 **p, struct
+               uasm_i_jr(p, ptr);
+               if (mode == refill_scratch) {
+-                      if (scratch_reg >= 0) {
+-                              uasm_i_ehb(p);
++                      if (scratch_reg >= 0)
+                               UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
+-                      } else {
++                      else
+                               UASM_i_LW(p, 1, scratchpad_offset(0), 0);
+-                      }
+               } else {
+                       uasm_i_nop(p);
+               }
diff --git a/queue-4.14/scsi-ch-make-it-possible-to-open-a-ch-device-multiple-times-again.patch b/queue-4.14/scsi-ch-make-it-possible-to-open-a-ch-device-multiple-times-again.patch
new file mode 100644 (file)
index 0000000..d4758bb
--- /dev/null
@@ -0,0 +1,49 @@
+From 6a0990eaa768dfb7064f06777743acc6d392084b Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 9 Oct 2019 10:35:36 -0700
+Subject: scsi: ch: Make it possible to open a ch device multiple times again
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit 6a0990eaa768dfb7064f06777743acc6d392084b upstream.
+
+Clearing ch->device in ch_release() is wrong because that pointer must
+remain valid until ch_remove() is called. This patch fixes the following
+crash the second time a ch device is opened:
+
+BUG: kernel NULL pointer dereference, address: 0000000000000790
+RIP: 0010:scsi_device_get+0x5/0x60
+Call Trace:
+ ch_open+0x4c/0xa0 [ch]
+ chrdev_open+0xa2/0x1c0
+ do_dentry_open+0x13a/0x380
+ path_openat+0x591/0x1470
+ do_filp_open+0x91/0x100
+ do_sys_open+0x184/0x220
+ do_syscall_64+0x5f/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fixes: 085e56766f74 ("scsi: ch: add refcounting")
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191009173536.247889-1-bvanassche@acm.org
+Reported-by: Rob Turk <robtu@rtist.nl>
+Suggested-by: Rob Turk <robtu@rtist.nl>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ch.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/scsi/ch.c
++++ b/drivers/scsi/ch.c
+@@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct f
+       scsi_changer *ch = file->private_data;
+       scsi_device_put(ch->device);
+-      ch->device = NULL;
+       file->private_data = NULL;
+       kref_put(&ch->ref, ch_destroy);
+       return 0;
diff --git a/queue-4.14/scsi-core-save-restore-command-resid-for-error-handling.patch b/queue-4.14/scsi-core-save-restore-command-resid-for-error-handling.patch
new file mode 100644 (file)
index 0000000..a4eb123
--- /dev/null
@@ -0,0 +1,73 @@
+From 8f8fed0cdbbd6cdbf28d9ebe662f45765d2f7d39 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Tue, 1 Oct 2019 16:48:39 +0900
+Subject: scsi: core: save/restore command resid for error handling
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 8f8fed0cdbbd6cdbf28d9ebe662f45765d2f7d39 upstream.
+
+When a non-passthrough command is terminated with CHECK CONDITION, request
+sense is executed by hijacking the command descriptor. Since
+scsi_eh_prep_cmnd() and scsi_eh_restore_cmnd() do not save/restore the
+original command resid, the value returned on failure of the original
+command is lost and replaced with the value set by the execution of the
+request sense command. This value may in many instances be unaligned to the
+device sector size, causing sd_done() to print a warning message about the
+incorrect unaligned resid before the command is retried.
+
+Fix this problem by saving the original command residual in struct
+scsi_eh_save using scsi_eh_prep_cmnd() and restoring it in
+scsi_eh_restore_cmnd(). In addition, to make sure that the request sense
+command is executed with a correctly initialized command structure, also
+reset the residual to 0 in scsi_eh_prep_cmnd() after saving the original
+command value in struct scsi_eh_save.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191001074839.1994-1-damien.lemoal@wdc.com
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/scsi_error.c |    3 +++
+ include/scsi/scsi_eh.h    |    1 +
+ 2 files changed, 4 insertions(+)
+
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -935,6 +935,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd
+       ses->sdb = scmd->sdb;
+       ses->next_rq = scmd->request->next_rq;
+       ses->result = scmd->result;
++      ses->resid_len = scmd->req.resid_len;
+       ses->underflow = scmd->underflow;
+       ses->prot_op = scmd->prot_op;
+       ses->eh_eflags = scmd->eh_eflags;
+@@ -946,6 +947,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd
+       memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+       scmd->request->next_rq = NULL;
+       scmd->result = 0;
++      scmd->req.resid_len = 0;
+       if (sense_bytes) {
+               scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
+@@ -999,6 +1001,7 @@ void scsi_eh_restore_cmnd(struct scsi_cm
+       scmd->sdb = ses->sdb;
+       scmd->request->next_rq = ses->next_rq;
+       scmd->result = ses->result;
++      scmd->req.resid_len = ses->resid_len;
+       scmd->underflow = ses->underflow;
+       scmd->prot_op = ses->prot_op;
+       scmd->eh_eflags = ses->eh_eflags;
+--- a/include/scsi/scsi_eh.h
++++ b/include/scsi/scsi_eh.h
+@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_
+ struct scsi_eh_save {
+       /* saved state */
+       int result;
++      unsigned int resid_len;
+       int eh_eflags;
+       enum dma_data_direction data_direction;
+       unsigned underflow;
diff --git a/queue-4.14/scsi-core-try-to-get-module-before-removing-device.patch b/queue-4.14/scsi-core-try-to-get-module-before-removing-device.patch
new file mode 100644 (file)
index 0000000..6ada38d
--- /dev/null
@@ -0,0 +1,94 @@
+From 77c301287ebae86cc71d03eb3806f271cb14da79 Mon Sep 17 00:00:00 2001
+From: Yufen Yu <yuyufen@huawei.com>
+Date: Tue, 15 Oct 2019 21:05:56 +0800
+Subject: scsi: core: try to get module before removing device
+
+From: Yufen Yu <yuyufen@huawei.com>
+
+commit 77c301287ebae86cc71d03eb3806f271cb14da79 upstream.
+
+We have a test case like block/001 in blktests, which will create a scsi
+device by loading scsi_debug module and then try to delete the device by
+sysfs interface. At the same time, it may remove the scsi_debug module.
+
+And getting a invalid paging request BUG_ON as following:
+
+[   34.625854] BUG: unable to handle page fault for address: ffffffffa0016bb8
+[   34.629189] Oops: 0000 [#1] SMP PTI
+[   34.629618] CPU: 1 PID: 450 Comm: bash Tainted: G        W         5.4.0-rc3+ #473
+[   34.632524] RIP: 0010:scsi_proc_hostdir_rm+0x5/0xa0
+[   34.643555] CR2: ffffffffa0016bb8 CR3: 000000012cd88000 CR4: 00000000000006e0
+[   34.644545] Call Trace:
+[   34.644907]  scsi_host_dev_release+0x6b/0x1f0
+[   34.645511]  device_release+0x74/0x110
+[   34.646046]  kobject_put+0x116/0x390
+[   34.646559]  put_device+0x17/0x30
+[   34.647041]  scsi_target_dev_release+0x2b/0x40
+[   34.647652]  device_release+0x74/0x110
+[   34.648186]  kobject_put+0x116/0x390
+[   34.648691]  put_device+0x17/0x30
+[   34.649157]  scsi_device_dev_release_usercontext+0x2e8/0x360
+[   34.649953]  execute_in_process_context+0x29/0x80
+[   34.650603]  scsi_device_dev_release+0x20/0x30
+[   34.651221]  device_release+0x74/0x110
+[   34.651732]  kobject_put+0x116/0x390
+[   34.652230]  sysfs_unbreak_active_protection+0x3f/0x50
+[   34.652935]  sdev_store_delete.cold.4+0x71/0x8f
+[   34.653579]  dev_attr_store+0x1b/0x40
+[   34.654103]  sysfs_kf_write+0x3d/0x60
+[   34.654603]  kernfs_fop_write+0x174/0x250
+[   34.655165]  __vfs_write+0x1f/0x60
+[   34.655639]  vfs_write+0xc7/0x280
+[   34.656117]  ksys_write+0x6d/0x140
+[   34.656591]  __x64_sys_write+0x1e/0x30
+[   34.657114]  do_syscall_64+0xb1/0x400
+[   34.657627]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[   34.658335] RIP: 0033:0x7f156f337130
+
+During deleting scsi target, the scsi_debug module have been removed. Then,
+sdebug_driver_template belonged to the module cannot be accessd, resulting
+in scsi_proc_hostdir_rm() BUG_ON.
+
+To fix the bug, we add scsi_device_get() in sdev_store_delete() to try to
+increase refcount of module, avoiding the module been removed.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191015130556.18061-1-yuyufen@huawei.com
+Signed-off-by: Yufen Yu <yuyufen@huawei.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/scsi_sysfs.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -722,6 +722,14 @@ sdev_store_delete(struct device *dev, st
+                 const char *buf, size_t count)
+ {
+       struct kernfs_node *kn;
++      struct scsi_device *sdev = to_scsi_device(dev);
++
++      /*
++       * We need to try to get module, avoiding the module been removed
++       * during delete.
++       */
++      if (scsi_device_get(sdev))
++              return -ENODEV;
+       kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+       WARN_ON_ONCE(!kn);
+@@ -736,9 +744,10 @@ sdev_store_delete(struct device *dev, st
+        * state into SDEV_DEL.
+        */
+       device_remove_file(dev, attr);
+-      scsi_remove_device(to_scsi_device(dev));
++      scsi_remove_device(sdev);
+       if (kn)
+               sysfs_unbreak_active_protection(kn);
++      scsi_device_put(sdev);
+       return count;
+ };
+ static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/queue-4.14/scsi-sd-ignore-a-failure-to-sync-cache-due-to-lack-of-authorization.patch b/queue-4.14/scsi-sd-ignore-a-failure-to-sync-cache-due-to-lack-of-authorization.patch
new file mode 100644 (file)
index 0000000..00a70d9
--- /dev/null
@@ -0,0 +1,38 @@
+From 21e3d6c81179bbdfa279efc8de456c34b814cfd2 Mon Sep 17 00:00:00 2001
+From: Oliver Neukum <oneukum@suse.com>
+Date: Tue, 3 Sep 2019 12:18:39 +0200
+Subject: scsi: sd: Ignore a failure to sync cache due to lack of authorization
+
+From: Oliver Neukum <oneukum@suse.com>
+
+commit 21e3d6c81179bbdfa279efc8de456c34b814cfd2 upstream.
+
+I've got a report about a UAS drive enclosure reporting back Sense: Logical
+unit access not authorized if the drive it holds is password protected.
+While the drive is obviously unusable in that state as a mass storage
+device, it still exists as a sd device and when the system is asked to
+perform a suspend of the drive, it will be sent a SYNCHRONIZE CACHE. If
+that fails due to password protection, the error must be ignored.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20190903101840.16483-1-oneukum@suse.com
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sd.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1658,7 +1658,8 @@ static int sd_sync_cache(struct scsi_dis
+               /* we need to evaluate the error return  */
+               if (scsi_sense_valid(sshdr) &&
+                       (sshdr->asc == 0x3a ||  /* medium not present */
+-                       sshdr->asc == 0x20))   /* invalid command */
++                       sshdr->asc == 0x20 ||  /* invalid command */
++                       (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))  /* drive is password locked */
+                               /* this is no error here */
+                               return 0;
index 24d35d95bba082b9ff92e1a803c18d1b23c9040c..895538ef3f6ca33f8f04c78ae1ac2183f651b6e2 100644 (file)
@@ -34,3 +34,66 @@ usb-serial-ti_usb_3410_5052-fix-port-close-races.patch
 usb-ldusb-fix-memleak-on-disconnect.patch
 usb-usblp-fix-use-after-free-on-disconnect.patch
 usb-ldusb-fix-read-info-leaks.patch
+arm64-sysreg-move-to-use-definitions-for-all-the-sctlr-bits.patch
+arm64-expose-support-for-optional-armv8-a-features.patch
+arm64-fix-the-feature-type-for-id-register-fields.patch
+arm64-v8.4-support-for-new-floating-point-multiplication-instructions.patch
+arm64-documentation-cpu-feature-registers-remove-res0-fields.patch
+arm64-expose-arm-v8.4-features.patch
+arm64-move-sctlr_el-1-2-assertions-to-asm-sysreg.h.patch
+arm64-add-psr_aa32_-definitions.patch
+arm64-introduce-sysreg_clear_set.patch
+arm64-capabilities-update-prototype-for-enable-call-back.patch
+arm64-capabilities-move-errata-work-around-check-on-boot-cpu.patch
+arm64-capabilities-move-errata-processing-code.patch
+arm64-capabilities-prepare-for-fine-grained-capabilities.patch
+arm64-capabilities-add-flags-to-handle-the-conflicts-on-late-cpu.patch
+arm64-capabilities-unify-the-verification.patch
+arm64-capabilities-filter-the-entries-based-on-a-given-mask.patch
+arm64-capabilities-prepare-for-grouping-features-and-errata-work-arounds.patch
+arm64-capabilities-split-the-processing-of-errata-work-arounds.patch
+arm64-capabilities-allow-features-based-on-local-cpu-scope.patch
+arm64-capabilities-group-handling-of-features-and-errata-workarounds.patch
+arm64-capabilities-introduce-weak-features-based-on-local-cpu.patch
+arm64-capabilities-restrict-kpti-detection-to-boot-time-cpus.patch
+arm64-capabilities-add-support-for-features-enabled-early.patch
+arm64-capabilities-change-scope-of-vhe-to-boot-cpu-feature.patch
+arm64-capabilities-clean-up-midr-range-helpers.patch
+arm64-add-helpers-for-checking-cpu-midr-against-a-range.patch
+arm64-add-midr-encoding-for-arm-cortex-a55-and-cortex-a35.patch
+arm64-capabilities-add-support-for-checks-based-on-a-list-of-midrs.patch
+arm64-kvm-use-smccc_arch_workaround_1-for-falkor-bp-hardening.patch
+arm64-don-t-zero-dit-on-signal-return.patch
+arm64-get-rid-of-__smccc_workaround_1_hvc_.patch
+arm64-cpufeature-detect-ssbs-and-advertise-to-userspace.patch
+arm64-ssbd-add-support-for-pstate.ssbs-rather-than-trapping-to-el3.patch
+kvm-arm64-set-sctlr_el2.dssbs-if-ssbd-is-forcefully-disabled-and-vhe.patch
+arm64-fix-ssbs-sanitization.patch
+arm64-add-sysfs-vulnerability-show-for-spectre-v1.patch
+arm64-add-sysfs-vulnerability-show-for-meltdown.patch
+arm64-enable-generic-cpu-vulnerabilites-support.patch
+arm64-always-enable-ssb-vulnerability-detection.patch
+arm64-provide-a-command-line-to-disable-spectre_v2-mitigation.patch
+arm64-advertise-mitigation-of-spectre-v2-or-lack-thereof.patch
+arm64-always-enable-spectre-v2-vulnerability-detection.patch
+arm64-add-sysfs-vulnerability-show-for-spectre-v2.patch
+arm64-add-sysfs-vulnerability-show-for-speculative-store-bypass.patch
+arm64-ssbs-don-t-treat-cpus-with-ssbs-as-unaffected-by-ssb.patch
+arm64-force-ssbs-on-context-switch.patch
+arm64-use-firmware-to-detect-cpus-that-are-not-affected-by-spectre-v2.patch
+arm64-speculation-support-mitigations-cmdline-option.patch
+mips-tlbex-fix-build_restore_pagemask-kscratch-restore.patch
+staging-wlan-ng-fix-exit-return-when-sme-key_idx-num_wepkeys.patch
+scsi-sd-ignore-a-failure-to-sync-cache-due-to-lack-of-authorization.patch
+scsi-core-save-restore-command-resid-for-error-handling.patch
+scsi-core-try-to-get-module-before-removing-device.patch
+scsi-ch-make-it-possible-to-open-a-ch-device-multiple-times-again.patch
+input-da9063-fix-capability-and-drop-key_sleep.patch
+input-synaptics-rmi4-avoid-processing-unknown-irqs.patch
+asoc-rsnd-reinitialize-bit-clock-inversion-flag-for-every-format-setting.patch
+cfg80211-wext-avoid-copying-malformed-ssids.patch
+mac80211-reject-malformed-ssid-elements.patch
+drm-edid-add-6-bpc-quirk-for-sdc-panel-in-lenovo-g50.patch
+drm-amdgpu-bail-earlier-when-amdgpu.cik_-si_support-is-not-set-to-1.patch
+drivers-base-memory.c-don-t-access-uninitialized-memmaps-in-soft_offline_page_store.patch
+fs-proc-page.c-don-t-access-uninitialized-memmaps-in-fs-proc-page.c.patch
diff --git a/queue-4.14/staging-wlan-ng-fix-exit-return-when-sme-key_idx-num_wepkeys.patch b/queue-4.14/staging-wlan-ng-fix-exit-return-when-sme-key_idx-num_wepkeys.patch
new file mode 100644 (file)
index 0000000..1364781
--- /dev/null
@@ -0,0 +1,40 @@
+From 153c5d8191c26165dbbd2646448ca7207f7796d0 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 14 Oct 2019 12:02:01 +0100
+Subject: staging: wlan-ng: fix exit return when sme->key_idx >= NUM_WEPKEYS
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 153c5d8191c26165dbbd2646448ca7207f7796d0 upstream.
+
+Currently the exit return path when sme->key_idx >= NUM_WEPKEYS is via
+label 'exit' and this checks if result is non-zero, however result has
+not been initialized and contains garbage.  Fix this by replacing the
+goto with a return with the error code.
+
+Addresses-Coverity: ("Uninitialized scalar variable")
+Fixes: 0ca6d8e74489 ("Staging: wlan-ng: replace switch-case statements with macro")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Cc: stable <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191014110201.9874-1-colin.king@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/wlan-ng/cfg80211.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/staging/wlan-ng/cfg80211.c
++++ b/drivers/staging/wlan-ng/cfg80211.c
+@@ -490,10 +490,8 @@ static int prism2_connect(struct wiphy *
+       /* Set the encryption - we only support wep */
+       if (is_wep) {
+               if (sme->key) {
+-                      if (sme->key_idx >= NUM_WEPKEYS) {
+-                              err = -EINVAL;
+-                              goto exit;
+-                      }
++                      if (sme->key_idx >= NUM_WEPKEYS)
++                              return -EINVAL;
+                       result = prism2_domibset_uint32(wlandev,
+                               DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,