--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+Date: Mon, 24 Jan 2022 08:45:37 +0530
+Subject: arm64: Add Cortex-X2 CPU part definition
+
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+
+commit 72bb9dcb6c33cfac80282713c2b4f2b254cd24d1 upstream.
+
+Add the CPU Partnumbers for the new Arm designs.
+
+Cc: Will Deacon <will@kernel.org>
+Cc: Suzuki Poulose <suzuki.poulose@arm.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/1642994138-25887-2-git-send-email-anshuman.khandual@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -75,6 +75,7 @@
+ #define ARM_CPU_PART_CORTEX_A77 0xD0D
+ #define ARM_CPU_PART_CORTEX_A510 0xD46
+ #define ARM_CPU_PART_CORTEX_A710 0xD47
++#define ARM_CPU_PART_CORTEX_X2 0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2 0xD49
+
+ #define APM_CPU_PART_POTENZA 0x000
+@@ -118,6 +119,7 @@
+ #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
++#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: Marc Zyngier <maz@kernel.org>
+Date: Sun, 17 Oct 2021 13:42:25 +0100
+Subject: arm64: Add HWCAP for self-synchronising virtual counter
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit fee29f008aa3f2aff01117f28b57b1145d92cb9b upstream.
+
+Since userspace can make use of the CNTVSS_EL0 instruction, expose
+it via a HWCAP.
+
+Suggested-by: Will Deacon <will@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20211017124225.3018098-18-maz@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.rst | 12 ++++++++++--
+ Documentation/arm64/elf_hwcaps.rst | 4 ++++
+ arch/arm64/include/asm/hwcap.h | 1 +
+ arch/arm64/include/uapi/asm/hwcap.h | 1 +
+ arch/arm64/kernel/cpufeature.c | 3 ++-
+ arch/arm64/kernel/cpuinfo.c | 1 +
+ 6 files changed, 19 insertions(+), 3 deletions(-)
+
+--- a/Documentation/arm64/cpu-feature-registers.rst
++++ b/Documentation/arm64/cpu-feature-registers.rst
+@@ -235,7 +235,15 @@ infrastructure:
+ | DPB | [3-0] | y |
+ +------------------------------+---------+---------+
+
+- 6) ID_AA64MMFR2_EL1 - Memory model feature register 2
++ 6) ID_AA64MMFR0_EL1 - Memory model feature register 0
++
++ +------------------------------+---------+---------+
++ | Name | bits | visible |
++ +------------------------------+---------+---------+
++ | ECV | [63-60] | y |
++ +------------------------------+---------+---------+
++
++ 7) ID_AA64MMFR2_EL1 - Memory model feature register 2
+
+ +------------------------------+---------+---------+
+ | Name | bits | visible |
+@@ -243,7 +251,7 @@ infrastructure:
+ | AT | [35-32] | y |
+ +------------------------------+---------+---------+
+
+- 7) ID_AA64ZFR0_EL1 - SVE feature ID register 0
++ 8) ID_AA64ZFR0_EL1 - SVE feature ID register 0
+
+ +------------------------------+---------+---------+
+ | Name | bits | visible |
+--- a/Documentation/arm64/elf_hwcaps.rst
++++ b/Documentation/arm64/elf_hwcaps.rst
+@@ -247,6 +247,10 @@ HWCAP2_MTE
+ Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described
+ by Documentation/arm64/memory-tagging-extension.rst.
+
++HWCAP2_ECV
++
++ Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
++
+ 4. Unused AT_HWCAP bits
+ -----------------------
+
+--- a/arch/arm64/include/asm/hwcap.h
++++ b/arch/arm64/include/asm/hwcap.h
+@@ -105,6 +105,7 @@
+ #define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
+ #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
+ #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
++#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
+
+ /*
+ * This yields a mask that user programs can use to figure out what
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -75,5 +75,6 @@
+ #define HWCAP2_RNG (1 << 16)
+ #define HWCAP2_BTI (1 << 17)
+ #define HWCAP2_MTE (1 << 18)
++#define HWCAP2_ECV (1 << 19)
+
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -279,7 +279,7 @@ static const struct arm64_ftr_bits ftr_i
+ };
+
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
+ /*
+@@ -2455,6 +2455,7 @@ static const struct arm64_cpu_capabiliti
+ #ifdef CONFIG_ARM64_MTE
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
+ #endif /* CONFIG_ARM64_MTE */
++ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
+ {},
+ };
+
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -94,6 +94,7 @@ static const char *const hwcap_str[] = {
+ [KERNEL_HWCAP_RNG] = "rng",
+ [KERNEL_HWCAP_BTI] = "bti",
+ [KERNEL_HWCAP_MTE] = "mte",
++ [KERNEL_HWCAP_ECV] = "ecv",
+ };
+
+ #ifdef CONFIG_COMPAT
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: Joey Gouly <joey.gouly@arm.com>
+Date: Fri, 10 Dec 2021 16:54:31 +0000
+Subject: arm64: add ID_AA64ISAR2_EL1 sys register
+
+From: Joey Gouly <joey.gouly@arm.com>
+
+commit 9e45365f1469ef2b934f9d035975dbc9ad352116 upstream.
+
+This is a new ID register, introduced in 8.7.
+
+Signed-off-by: Joey Gouly <joey.gouly@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: James Morse <james.morse@arm.com>
+Cc: Alexandru Elisei <alexandru.elisei@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: Reiji Watanabe <reijiw@google.com>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20211210165432.8106-3-joey.gouly@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cpu.h | 1 +
+ arch/arm64/include/asm/sysreg.h | 15 +++++++++++++++
+ arch/arm64/kernel/cpufeature.c | 9 +++++++++
+ arch/arm64/kernel/cpuinfo.c | 1 +
+ arch/arm64/kvm/sys_regs.c | 2 +-
+ 5 files changed, 27 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cpu.h
++++ b/arch/arm64/include/asm/cpu.h
+@@ -51,6 +51,7 @@ struct cpuinfo_arm64 {
+ u64 reg_id_aa64dfr1;
+ u64 reg_id_aa64isar0;
+ u64 reg_id_aa64isar1;
++ u64 reg_id_aa64isar2;
+ u64 reg_id_aa64mmfr0;
+ u64 reg_id_aa64mmfr1;
+ u64 reg_id_aa64mmfr2;
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -180,6 +180,7 @@
+
+ #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
+ #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
++#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2)
+
+ #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
+ #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
+@@ -764,6 +765,20 @@
+ #define ID_AA64ISAR1_GPI_NI 0x0
+ #define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+
++/* id_aa64isar2 */
++#define ID_AA64ISAR2_RPRES_SHIFT 4
++#define ID_AA64ISAR2_WFXT_SHIFT 0
++
++#define ID_AA64ISAR2_RPRES_8BIT 0x0
++#define ID_AA64ISAR2_RPRES_12BIT 0x1
++/*
++ * Value 0x1 has been removed from the architecture, and is
++ * reserved, but has not yet been removed from the ARM ARM
++ * as of ARM DDI 0487G.b.
++ */
++#define ID_AA64ISAR2_WFXT_NI 0x0
++#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2
++
+ /* id_aa64pfr0 */
+ #define ID_AA64PFR0_CSV3_SHIFT 60
+ #define ID_AA64PFR0_CSV2_SHIFT 56
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -225,6 +225,10 @@ static const struct arm64_ftr_bits ftr_i
+ ARM64_FTR_END,
+ };
+
++static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
++ ARM64_FTR_END,
++};
++
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+@@ -637,6 +641,7 @@ static const struct __ftr_reg_entry {
+ ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
+ &id_aa64isar1_override),
++ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
+
+ /* Op1 = 0, CRn = 0, CRm = 7 */
+ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+@@ -933,6 +938,7 @@ void __init init_cpu_features(struct cpu
+ init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
++ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
+@@ -1151,6 +1157,8 @@ void update_cpu_features(int cpu,
+ info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
+ info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
++ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
++ info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
+
+ /*
+ * Differing PARange support is fine as long as all peripherals and
+@@ -1272,6 +1280,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id
+ read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
++ read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
+
+ read_sysreg_case(SYS_CNTFRQ_EL0);
+ read_sysreg_case(SYS_CTR_EL0);
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -391,6 +391,7 @@ static void __cpuinfo_store_cpu(struct c
+ info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
+ info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+ info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
++ info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+ info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1518,7 +1518,7 @@ static const struct sys_reg_desc sys_reg
+ /* CRm=6 */
+ ID_SANITISED(ID_AA64ISAR0_EL1),
+ ID_SANITISED(ID_AA64ISAR1_EL1),
+- ID_UNALLOCATED(6,2),
++ ID_SANITISED(ID_AA64ISAR2_EL1),
+ ID_UNALLOCATED(6,3),
+ ID_UNALLOCATED(6,4),
+ ID_UNALLOCATED(6,5),
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 19 Oct 2021 17:31:39 +0100
+Subject: arm64: Add Neoverse-N2, Cortex-A710 CPU part definition
+
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+
+commit 2d0d656700d67239a57afaf617439143d8dac9be upstream.
+
+Add the CPU Partnumbers for the new Arm designs.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/20211019163153.3692640-2-suzuki.poulose@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -74,6 +74,8 @@
+ #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
+ #define ARM_CPU_PART_CORTEX_A77 0xD0D
+ #define ARM_CPU_PART_CORTEX_A510 0xD46
++#define ARM_CPU_PART_CORTEX_A710 0xD47
++#define ARM_CPU_PART_NEOVERSE_N2 0xD49
+
+ #define APM_CPU_PART_POTENZA 0x000
+
+@@ -115,6 +117,8 @@
+ #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+ #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
++#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
++#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Tue, 23 Nov 2021 18:29:25 +0000
+Subject: arm64: Add percpu vectors for EL1
+
+From: James Morse <james.morse@arm.com>
+
+commit bd09128d16fac3c34b80bd6a29088ac632e8ce09 upstream.
+
+The Spectre-BHB workaround adds a firmware call to the vectors. This
+is needed on some CPUs, but not others. To avoid the unaffected CPU in
+a big/little pair from making the firmware call, create per cpu vectors.
+
+The per-cpu vectors only apply when returning from EL0.
+
+Systems using KPTI can use the canonical 'full-fat' vectors directly at
+EL1, the trampoline exit code will switch to this_cpu_vector on exit to
+EL0. Systems not using KPTI should always use this_cpu_vector.
+
+this_cpu_vector will point at a vector in tramp_vecs or
+__bp_harden_el1_vectors, depending on whether KPTI is in use.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/vectors.h | 29 ++++++++++++++++++++++++++++-
+ arch/arm64/kernel/cpufeature.c | 11 +++++++++++
+ arch/arm64/kernel/entry.S | 12 ++++++------
+ arch/arm64/kvm/hyp/vhe/switch.c | 9 +++++++--
+ 4 files changed, 52 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/include/asm/vectors.h
++++ b/arch/arm64/include/asm/vectors.h
+@@ -5,6 +5,15 @@
+ #ifndef __ASM_VECTORS_H
+ #define __ASM_VECTORS_H
+
++#include <linux/bug.h>
++#include <linux/percpu.h>
++
++#include <asm/fixmap.h>
++
++extern char vectors[];
++extern char tramp_vectors[];
++extern char __bp_harden_el1_vectors[];
++
+ /*
+ * Note: the order of this enum corresponds to two arrays in entry.S:
+ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
+@@ -29,6 +38,24 @@ enum arm64_bp_harden_el1_vectors {
+ * Remap the kernel before branching to the canonical vectors.
+ */
+ EL1_VECTOR_KPTI,
+-+};
++};
++
++/* The vectors to use on return from EL0. e.g. to remap the kernel */
++DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
++
++#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
++#define TRAMP_VALIAS 0
++#endif
++
++static inline const char *
++arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
++{
++ if (arm64_kernel_unmapped_at_el0())
++ return (char *)TRAMP_VALIAS + SZ_2K * slot;
++
++ WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
++
++ return __bp_harden_el1_vectors + SZ_2K * slot;
++}
+
+ #endif /* __ASM_VECTORS_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -73,6 +73,8 @@
+ #include <linux/mm.h>
+ #include <linux/cpu.h>
+ #include <linux/kasan.h>
++#include <linux/percpu.h>
++
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+ #include <asm/cpu_ops.h>
+@@ -85,6 +87,7 @@
+ #include <asm/smp.h>
+ #include <asm/sysreg.h>
+ #include <asm/traps.h>
++#include <asm/vectors.h>
+ #include <asm/virt.h>
+
+ /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
+@@ -110,6 +113,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_
+ bool arm64_use_ng_mappings = false;
+ EXPORT_SYMBOL(arm64_use_ng_mappings);
+
++DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
++
+ /*
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
+ * support it?
+@@ -1590,6 +1595,12 @@ kpti_install_ng_mappings(const struct ar
+
+ int cpu = smp_processor_id();
+
++ if (__this_cpu_read(this_cpu_vector) == vectors) {
++ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
++
++ __this_cpu_write(this_cpu_vector, v);
++ }
++
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -38,7 +38,6 @@
+ .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
+ .align 7
+ .Lventry_start\@:
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+ /*
+ * This must be the first instruction of the EL0 vector entries. It is
+@@ -53,7 +52,6 @@
+ .endif
+ .Lskip_tramp_vectors_cleanup\@:
+ .endif
+-#endif
+
+ sub sp, sp, #PT_REGS_SIZE
+ #ifdef CONFIG_VMAP_STACK
+@@ -712,10 +710,10 @@ alternative_else_nop_endif
+ .endm
+
+ .macro tramp_exit, regsize = 64
+- adr x30, tramp_vectors
+-#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+- add x30, x30, SZ_4K
+-#endif
++ tramp_data_read_var x30, this_cpu_vector
++ get_this_cpu_offset x29
++ ldr x30, [x30, x29]
++
+ msr vbar_el1, x30
+ ldr lr, [sp, #S_LR]
+ tramp_unmap_kernel x29
+@@ -775,6 +773,8 @@ __entry_tramp_data_vectors:
+ __entry_tramp_data___sdei_asm_handler:
+ .quad __sdei_asm_handler
+ #endif /* CONFIG_ARM_SDE_INTERFACE */
++__entry_tramp_data_this_cpu_vector:
++ .quad this_cpu_vector
+ SYM_DATA_END(__entry_tramp_data_start)
+ .popsection // .rodata
+ #endif /* CONFIG_RANDOMIZE_BASE */
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -10,6 +10,7 @@
+ #include <linux/kvm_host.h>
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
++#include <linux/percpu.h>
+ #include <uapi/linux/psci.h>
+
+ #include <kvm/arm_psci.h>
+@@ -25,6 +26,7 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/processor.h>
+ #include <asm/thread_info.h>
++#include <asm/vectors.h>
+
+ /* VHE specific context */
+ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
+@@ -68,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps);
+
+ static void __deactivate_traps(struct kvm_vcpu *vcpu)
+ {
+- extern char vectors[]; /* kernel exception vectors */
++ const char *host_vectors = vectors;
+
+ ___deactivate_traps(vcpu);
+
+@@ -82,7 +84,10 @@ static void __deactivate_traps(struct kv
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
+
+ write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
+- write_sysreg(vectors, vbar_el1);
++
++ if (!arm64_kernel_unmapped_at_el0())
++ host_vectors = __this_cpu_read(this_cpu_vector);
++ write_sysreg(host_vectors, vbar_el1);
+ }
+ NOKPROBE_SYMBOL(__deactivate_traps);
+
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: Joey Gouly <joey.gouly@arm.com>
+Date: Fri, 10 Dec 2021 16:54:30 +0000
+Subject: arm64: cpufeature: add HWCAP for FEAT_AFP
+
+From: Joey Gouly <joey.gouly@arm.com>
+
+commit 5c13f042e73200b50573ace63e1a6b94e2917616 upstream.
+
+Add a new HWCAP to detect the Alternate Floating-point Behaviour
+feature (FEAT_AFP), introduced in Armv8.7.
+
+Also expose this to userspace in the ID_AA64MMFR1_EL1 feature register.
+
+Signed-off-by: Joey Gouly <joey.gouly@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20211210165432.8106-2-joey.gouly@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.rst | 9 +++++++++
+ Documentation/arm64/elf_hwcaps.rst | 4 ++++
+ arch/arm64/include/asm/hwcap.h | 1 +
+ arch/arm64/include/asm/sysreg.h | 1 +
+ arch/arm64/include/uapi/asm/hwcap.h | 1 +
+ arch/arm64/kernel/cpufeature.c | 2 ++
+ arch/arm64/kernel/cpuinfo.c | 1 +
+ 7 files changed, 19 insertions(+)
+
+--- a/Documentation/arm64/cpu-feature-registers.rst
++++ b/Documentation/arm64/cpu-feature-registers.rst
+@@ -275,6 +275,15 @@ infrastructure:
+ | SVEVer | [3-0] | y |
+ +------------------------------+---------+---------+
+
++ 8) ID_AA64MMFR1_EL1 - Memory model feature register 1
++
++ +------------------------------+---------+---------+
++ | Name | bits | visible |
++ +------------------------------+---------+---------+
++ | AFP | [47-44] | y |
++ +------------------------------+---------+---------+
++
++
+ Appendix I: Example
+ -------------------
+
+--- a/Documentation/arm64/elf_hwcaps.rst
++++ b/Documentation/arm64/elf_hwcaps.rst
+@@ -251,6 +251,10 @@ HWCAP2_ECV
+
+ Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
+
++HWCAP2_AFP
++
++ Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
++
+ 4. Unused AT_HWCAP bits
+ -----------------------
+
+--- a/arch/arm64/include/asm/hwcap.h
++++ b/arch/arm64/include/asm/hwcap.h
+@@ -106,6 +106,7 @@
+ #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
+ #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
+ #define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
++#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
+
+ /*
+ * This yields a mask that user programs can use to figure out what
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -896,6 +896,7 @@
+ #endif
+
+ /* id_aa64mmfr1 */
++#define ID_AA64MMFR1_AFP_SHIFT 44
+ #define ID_AA64MMFR1_ETS_SHIFT 36
+ #define ID_AA64MMFR1_TWED_SHIFT 32
+ #define ID_AA64MMFR1_XNX_SHIFT 28
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -76,5 +76,6 @@
+ #define HWCAP2_BTI (1 << 17)
+ #define HWCAP2_MTE (1 << 18)
+ #define HWCAP2_ECV (1 << 19)
++#define HWCAP2_AFP (1 << 20)
+
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -329,6 +329,7 @@ static const struct arm64_ftr_bits ftr_i
+ };
+
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
+@@ -2465,6 +2466,7 @@ static const struct arm64_cpu_capabiliti
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
+ #endif /* CONFIG_ARM64_MTE */
+ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
++ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
+ {},
+ };
+
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -95,6 +95,7 @@ static const char *const hwcap_str[] = {
+ [KERNEL_HWCAP_BTI] = "bti",
+ [KERNEL_HWCAP_MTE] = "mte",
+ [KERNEL_HWCAP_ECV] = "ecv",
++ [KERNEL_HWCAP_AFP] = "afp",
+ };
+
+ #ifdef CONFIG_COMPAT
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: Joey Gouly <joey.gouly@arm.com>
+Date: Fri, 10 Dec 2021 16:54:32 +0000
+Subject: arm64: cpufeature: add HWCAP for FEAT_RPRES
+
+From: Joey Gouly <joey.gouly@arm.com>
+
+commit 1175011a7d0030d49dc9c10bde36f08f26d0a8ee upstream.
+
+Add a new HWCAP to detect the Increased precision of Reciprocal Estimate
+and Reciprocal Square Root Estimate feature (FEAT_RPRES), introduced in Armv8.7.
+
+Also expose this to userspace in the ID_AA64ISAR2_EL1 feature register.
+
+Signed-off-by: Joey Gouly <joey.gouly@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20211210165432.8106-4-joey.gouly@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/cpu-feature-registers.rst | 8 ++++++++
+ Documentation/arm64/elf_hwcaps.rst | 4 ++++
+ arch/arm64/include/asm/hwcap.h | 1 +
+ arch/arm64/include/uapi/asm/hwcap.h | 1 +
+ arch/arm64/kernel/cpufeature.c | 2 ++
+ arch/arm64/kernel/cpuinfo.c | 1 +
+ 6 files changed, 17 insertions(+)
+
+--- a/Documentation/arm64/cpu-feature-registers.rst
++++ b/Documentation/arm64/cpu-feature-registers.rst
+@@ -283,6 +283,14 @@ infrastructure:
+ | AFP | [47-44] | y |
+ +------------------------------+---------+---------+
+
++ 9) ID_AA64ISAR2_EL1 - Instruction set attribute register 2
++
++ +------------------------------+---------+---------+
++ | Name | bits | visible |
++ +------------------------------+---------+---------+
++ | RPRES | [7-4] | y |
++ +------------------------------+---------+---------+
++
+
+ Appendix I: Example
+ -------------------
+--- a/Documentation/arm64/elf_hwcaps.rst
++++ b/Documentation/arm64/elf_hwcaps.rst
+@@ -255,6 +255,10 @@ HWCAP2_AFP
+
+ Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
+
++HWCAP2_RPRES
++
++ Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
++
+ 4. Unused AT_HWCAP bits
+ -----------------------
+
+--- a/arch/arm64/include/asm/hwcap.h
++++ b/arch/arm64/include/asm/hwcap.h
+@@ -107,6 +107,7 @@
+ #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
+ #define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
+ #define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
++#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
+
+ /*
+ * This yields a mask that user programs can use to figure out what
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -77,5 +77,6 @@
+ #define HWCAP2_MTE (1 << 18)
+ #define HWCAP2_ECV (1 << 19)
+ #define HWCAP2_AFP (1 << 20)
++#define HWCAP2_RPRES (1 << 21)
+
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -226,6 +226,7 @@ static const struct arm64_ftr_bits ftr_i
+ };
+
+ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
+ ARM64_FTR_END,
+ };
+
+@@ -2467,6 +2468,7 @@ static const struct arm64_cpu_capabiliti
+ #endif /* CONFIG_ARM64_MTE */
+ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
+ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
++ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+ {},
+ };
+
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -96,6 +96,7 @@ static const char *const hwcap_str[] = {
+ [KERNEL_HWCAP_MTE] = "mte",
+ [KERNEL_HWCAP_ECV] = "ecv",
+ [KERNEL_HWCAP_AFP] = "afp",
++ [KERNEL_HWCAP_RPRES] = "rpres",
+ };
+
+ #ifdef CONFIG_COMPAT
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Thu, 25 Nov 2021 14:25:34 +0000
+Subject: arm64: entry: Add macro for reading symbol addresses from the trampoline
+
+From: James Morse <james.morse@arm.com>
+
+commit b28a8eebe81c186fdb1a0078263b30576c8e1f42 upstream.
+
+The trampoline code needs to use the address of symbols in the wider
+kernel, e.g. vectors. PC-relative addressing wouldn't work as the
+trampoline code doesn't run at the address the linker expected.
+
+tramp_ventry uses a literal pool, unless CONFIG_RANDOMIZE_BASE is
+set, in which case it uses the data page as a literal pool because
+the data page can be unmapped when running in user-space, which is
+required for CPUs vulnerable to meltdown.
+
+Pull this logic out as a macro, instead of adding a third copy
+of it.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 37 ++++++++++++++++---------------------
+ 1 file changed, 16 insertions(+), 21 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -646,6 +646,15 @@ alternative_else_nop_endif
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
++ .macro tramp_data_read_var dst, var
++#ifdef CONFIG_RANDOMIZE_BASE
++ tramp_data_page \dst
++ add \dst, \dst, #:lo12:__entry_tramp_data_\var
++ ldr \dst, [\dst]
++#else
++ ldr \dst, =\var
++#endif
++ .endm
+
+ #define BHB_MITIGATION_NONE 0
+ #define BHB_MITIGATION_LOOP 1
+@@ -676,13 +685,8 @@ alternative_else_nop_endif
+ b .
+ 2:
+ tramp_map_kernel x30
+-#ifdef CONFIG_RANDOMIZE_BASE
+- tramp_data_page x30
+ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+- ldr x30, [x30]
+-#else
+- ldr x30, =vectors
+-#endif
++ tramp_data_read_var x30, vectors
+ alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+ prfm plil1strm, [x30, #(1b - \vector_start)]
+ alternative_else_nop_endif
+@@ -765,7 +769,12 @@ SYM_CODE_END(tramp_exit_compat)
+ .pushsection ".rodata", "a"
+ .align PAGE_SHIFT
+ SYM_DATA_START(__entry_tramp_data_start)
++__entry_tramp_data_vectors:
+ .quad vectors
++#ifdef CONFIG_ARM_SDE_INTERFACE
++__entry_tramp_data___sdei_asm_handler:
++ .quad __sdei_asm_handler
++#endif /* CONFIG_ARM_SDE_INTERFACE */
+ SYM_DATA_END(__entry_tramp_data_start)
+ .popsection // .rodata
+ #endif /* CONFIG_RANDOMIZE_BASE */
+@@ -932,14 +941,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoli
+ * Remember whether to unmap the kernel on exit.
+ */
+ 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
+-
+-#ifdef CONFIG_RANDOMIZE_BASE
+- tramp_data_page x4
+- add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+- ldr x4, [x4]
+-#else
+- ldr x4, =__sdei_asm_handler
+-#endif
++ tramp_data_read_var x4, __sdei_asm_handler
+ br x4
+ SYM_CODE_END(__sdei_asm_entry_trampoline)
+ NOKPROBE(__sdei_asm_entry_trampoline)
+@@ -962,13 +964,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline)
+ NOKPROBE(__sdei_asm_exit_trampoline)
+ .ltorg
+ .popsection // .entry.tramp.text
+-#ifdef CONFIG_RANDOMIZE_BASE
+-.pushsection ".rodata", "a"
+-SYM_DATA_START(__sdei_asm_trampoline_next_handler)
+- .quad __sdei_asm_handler
+-SYM_DATA_END(__sdei_asm_trampoline_next_handler)
+-.popsection // .rodata
+-#endif /* CONFIG_RANDOMIZE_BASE */
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+ /*
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Wed, 24 Nov 2021 15:03:15 +0000
+Subject: arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations
+
+From: James Morse <james.morse@arm.com>
+
+commit aff65393fa1401e034656e349abd655cfe272de0 upstream.
+
+kpti is an optional feature, for systems not using kpti a set of
+vectors for the spectre-bhb mitigations is needed.
+
+Add another set of vectors, __bp_harden_el1_vectors, that will be
+used if a mitigation is needed and kpti is not in use.
+
+The EL1 ventries are repeated verbatim as there is no additional
+work needed for entry from EL1.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 35 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -649,10 +649,11 @@ alternative_else_nop_endif
+ .macro tramp_ventry, vector_start, regsize, kpti
+ .align 7
+ 1:
+- .if \kpti == 1
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
++
++ .if \kpti == 1
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+ * entry onto the return stack and using a RET instruction to
+@@ -740,6 +741,38 @@ SYM_DATA_END(__entry_tramp_data_start)
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+ /*
++ * Exception vectors for spectre mitigations on entry from EL1 when
++ * kpti is not in use.
++ */
++ .macro generate_el1_vector
++.Lvector_start\@:
++ kernel_ventry 1, t, 64, sync // Synchronous EL1t
++ kernel_ventry 1, t, 64, irq // IRQ EL1t
++ kernel_ventry 1, t, 64, fiq // FIQ EL1h
++ kernel_ventry 1, t, 64, error // Error EL1t
++
++ kernel_ventry 1, h, 64, sync // Synchronous EL1h
++ kernel_ventry 1, h, 64, irq // IRQ EL1h
++ kernel_ventry 1, h, 64, fiq // FIQ EL1h
++ kernel_ventry 1, h, 64, error // Error EL1h
++
++ .rept 4
++ tramp_ventry .Lvector_start\@, 64, kpti=0
++ .endr
++ .rept 4
++ tramp_ventry .Lvector_start\@, 32, kpti=0
++ .endr
++ .endm
++
++ .pushsection ".entry.text", "ax"
++ .align 11
++SYM_CODE_START(__bp_harden_el1_vectors)
++ generate_el1_vector
++SYM_CODE_END(__bp_harden_el1_vectors)
++ .popsection
++
++
++/*
+ * Register switch for AArch64. The callee-saved registers need to be saved
+ * and restored. On entry:
+ * x0 = previous task_struct (must be preserved across the switch)
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Thu, 18 Nov 2021 13:59:46 +0000
+Subject: arm64: entry: Add vectors that have the bhb mitigation sequences
+
+From: James Morse <james.morse@arm.com>
+
+commit ba2689234be92024e5635d30fe744f4853ad97db upstream.
+
+Some CPUs affected by Spectre-BHB need a sequence of branches, or a
+firmware call to be run before any indirect branch. This needs to go
+in the vectors. No CPU needs both.
+
+While this can be patched in, it would run on all CPUs as there is a
+single set of vectors. If only one part of a big/little combination is
+affected, the unaffected CPUs have to run the mitigation too.
+
+Create extra vectors that include the sequence. Subsequent patches will
+allow affected CPUs to select this set of vectors. Later patches will
+modify the loop count to match what the CPU requires.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h | 24 ++++++++++++++++
+ arch/arm64/include/asm/vectors.h | 34 +++++++++++++++++++++++
+ arch/arm64/kernel/entry.S | 53 ++++++++++++++++++++++++++++++-------
+ arch/arm64/kernel/proton-pack.c | 16 +++++++++++
+ include/linux/arm-smccc.h | 5 +++
+ 5 files changed, 123 insertions(+), 9 deletions(-)
+ create mode 100644 arch/arm64/include/asm/vectors.h
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -830,4 +830,28 @@ alternative_endif
+
+ #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
+
++ .macro __mitigate_spectre_bhb_loop tmp
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++ mov \tmp, #32
++.Lspectre_bhb_loop\@:
++ b . + 4
++ subs \tmp, \tmp, #1
++ b.ne .Lspectre_bhb_loop\@
++ sb
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++ .endm
++
++ /* Save/restores x0-x3 to the stack */
++ .macro __mitigate_spectre_bhb_fw
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++ stp x0, x1, [sp, #-16]!
++ stp x2, x3, [sp, #-16]!
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
++alternative_cb smccc_patch_fw_mitigation_conduit
++ nop // Patched to SMC/HVC #0
++alternative_cb_end
++ ldp x2, x3, [sp], #16
++ ldp x0, x1, [sp], #16
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++ .endm
+ #endif /* __ASM_ASSEMBLER_H */
+--- /dev/null
++++ b/arch/arm64/include/asm/vectors.h
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2022 ARM Ltd.
++ */
++#ifndef __ASM_VECTORS_H
++#define __ASM_VECTORS_H
++
++/*
++ * Note: the order of this enum corresponds to two arrays in entry.S:
++ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
++ * 'full fat' vectors are used directly.
++ */
++enum arm64_bp_harden_el1_vectors {
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++ /*
++ * Perform the BHB loop mitigation, before branching to the canonical
++ * vectors.
++ */
++ EL1_VECTOR_BHB_LOOP,
++
++ /*
++ * Make the SMC call for firmware mitigation, before branching to the
++ * canonical vectors.
++ */
++ EL1_VECTOR_BHB_FW,
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++
++ /*
++ * Remap the kernel before branching to the canonical vectors.
++ */
++ EL1_VECTOR_KPTI,
+++};
++
++#endif /* __ASM_VECTORS_H */
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -646,13 +646,26 @@ alternative_else_nop_endif
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
+- .macro tramp_ventry, vector_start, regsize, kpti
++
++#define BHB_MITIGATION_NONE 0
++#define BHB_MITIGATION_LOOP 1
++#define BHB_MITIGATION_FW 2
++
++ .macro tramp_ventry, vector_start, regsize, kpti, bhb
+ .align 7
+ 1:
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+
++ .if \bhb == BHB_MITIGATION_LOOP
++ /*
++ * This sequence must appear before the first indirect branch. i.e. the
++ * ret out of tramp_ventry. It appears here because x30 is free.
++ */
++ __mitigate_spectre_bhb_loop x30
++ .endif // \bhb == BHB_MITIGATION_LOOP
++
+ .if \kpti == 1
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+@@ -680,6 +693,15 @@ alternative_else_nop_endif
+ ldr x30, =vectors
+ .endif // \kpti == 1
+
++ .if \bhb == BHB_MITIGATION_FW
++ /*
++ * The firmware sequence must appear before the first indirect branch.
++ * i.e. the ret out of tramp_ventry. But it also needs the stack to be
++ * mapped to save/restore the registers the SMC clobbers.
++ */
++ __mitigate_spectre_bhb_fw
++ .endif // \bhb == BHB_MITIGATION_FW
++
+ add x30, x30, #(1b - \vector_start + 4)
+ ret
+ .org 1b + 128 // Did we overflow the ventry slot?
+@@ -687,6 +709,9 @@ alternative_else_nop_endif
+
+ .macro tramp_exit, regsize = 64
+ adr x30, tramp_vectors
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++ add x30, x30, SZ_4K
++#endif
+ msr vbar_el1, x30
+ ldr lr, [sp, #S_LR]
+ tramp_unmap_kernel x29
+@@ -698,26 +723,32 @@ alternative_else_nop_endif
+ sb
+ .endm
+
+- .macro generate_tramp_vector, kpti
++ .macro generate_tramp_vector, kpti, bhb
+ .Lvector_start\@:
+ .space 0x400
+
+ .rept 4
+- tramp_ventry .Lvector_start\@, 64, \kpti
++ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
+ .endr
+ .rept 4
+- tramp_ventry .Lvector_start\@, 32, \kpti
++ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
+ .endr
+ .endm
+
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ /*
+ * Exception vectors trampoline.
++ * The order must match __bp_harden_el1_vectors and the
++ * arm64_bp_harden_el1_vectors enum.
+ */
+ .pushsection ".entry.tramp.text", "ax"
+ .align 11
+ SYM_CODE_START_NOALIGN(tramp_vectors)
+- generate_tramp_vector kpti=1
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
+ SYM_CODE_END(tramp_vectors)
+
+ SYM_CODE_START(tramp_exit_native)
+@@ -744,7 +775,7 @@ SYM_DATA_END(__entry_tramp_data_start)
+ * Exception vectors for spectre mitigations on entry from EL1 when
+ * kpti is not in use.
+ */
+- .macro generate_el1_vector
++ .macro generate_el1_vector, bhb
+ .Lvector_start\@:
+ kernel_ventry 1, t, 64, sync // Synchronous EL1t
+ kernel_ventry 1, t, 64, irq // IRQ EL1t
+@@ -757,17 +788,21 @@ SYM_DATA_END(__entry_tramp_data_start)
+ kernel_ventry 1, h, 64, error // Error EL1h
+
+ .rept 4
+- tramp_ventry .Lvector_start\@, 64, kpti=0
++ tramp_ventry .Lvector_start\@, 64, 0, \bhb
+ .endr
+ .rept 4
+- tramp_ventry .Lvector_start\@, 32, kpti=0
++ tramp_ventry .Lvector_start\@, 32, 0, \bhb
+ .endr
+ .endm
+
++/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
+ .pushsection ".entry.text", "ax"
+ .align 11
+ SYM_CODE_START(__bp_harden_el1_vectors)
+- generate_el1_vector
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++ generate_el1_vector bhb=BHB_MITIGATION_LOOP
++ generate_el1_vector bhb=BHB_MITIGATION_FW
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ SYM_CODE_END(__bp_harden_el1_vectors)
+ .popsection
+
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -770,3 +770,19 @@ int arch_prctl_spec_ctrl_get(struct task
+ return -ENODEV;
+ }
+ }
++
++/* Patched to NOP when enabled */
++void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
++ __le32 *origptr,
++ __le32 *updptr, int nr_inst)
++{
++ BUG_ON(nr_inst != 1);
++}
++
++/* Patched to NOP when enabled */
++void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
++ __le32 *origptr,
++ __le32 *updptr, int nr_inst)
++{
++ BUG_ON(nr_inst != 1);
++}
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -92,6 +92,11 @@
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
++#define ARM_SMCCC_ARCH_WORKAROUND_3 \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 0x3fff)
++
+ #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Thu, 18 Nov 2021 15:04:32 +0000
+Subject: arm64: entry: Allow the trampoline text to occupy multiple pages
+
+From: James Morse <james.morse@arm.com>
+
+commit a9c406e6462ff14956d690de7bbe5131a5677dc9 upstream.
+
+Adding a second set of vectors to .entry.tramp.text will make it
+larger than a single 4K page.
+
+Allow the trampoline text to occupy up to three pages by adding two
+more fixmap slots. Previous changes to tramp_valias allowed it to reach
+beyond a single page.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fixmap.h | 6 ++++--
+ arch/arm64/include/asm/sections.h | 5 +++++
+ arch/arm64/kernel/entry.S | 2 +-
+ arch/arm64/kernel/vmlinux.lds.S | 2 +-
+ arch/arm64/mm/mmu.c | 12 +++++++++---
+ 5 files changed, 20 insertions(+), 7 deletions(-)
+
+--- a/arch/arm64/include/asm/fixmap.h
++++ b/arch/arm64/include/asm/fixmap.h
+@@ -62,9 +62,11 @@ enum fixed_addresses {
+ #endif /* CONFIG_ACPI_APEI_GHES */
+
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+- FIX_ENTRY_TRAMP_TEXT,
++ FIX_ENTRY_TRAMP_TEXT3,
++ FIX_ENTRY_TRAMP_TEXT2,
++ FIX_ENTRY_TRAMP_TEXT1,
+ FIX_ENTRY_TRAMP_DATA,
+-#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
++#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ __end_of_permanent_fixed_addresses,
+
+--- a/arch/arm64/include/asm/sections.h
++++ b/arch/arm64/include/asm/sections.h
+@@ -22,4 +22,9 @@ extern char __irqentry_text_start[], __i
+ extern char __mmuoff_data_start[], __mmuoff_data_end[];
+ extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+
++static inline size_t entry_tramp_text_size(void)
++{
++ return __entry_tramp_text_end - __entry_tramp_text_start;
++}
++
+ #endif /* __ASM_SECTIONS_H */
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -642,7 +642,7 @@ alternative_else_nop_endif
+ .endm
+
+ .macro tramp_data_page dst
+- adr \dst, .entry.tramp.text
++ adr_l \dst, .entry.tramp.text
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -330,7 +330,7 @@ ASSERT(__hibernate_exit_text_end - (__hi
+ <= SZ_4K, "Hibernate exit text too big or misaligned")
+ #endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
++ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
+ "Entry trampoline text too big")
+ #endif
+ #ifdef CONFIG_KVM
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -616,6 +616,8 @@ early_param("rodata", parse_rodata);
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ static int __init map_entry_trampoline(void)
+ {
++ int i;
++
+ pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+@@ -624,11 +626,15 @@ static int __init map_entry_trampoline(v
+
+ /* Map only the text into the trampoline page table */
+ memset(tramp_pg_dir, 0, PGD_SIZE);
+- __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+- prot, __pgd_pgtable_alloc, 0);
++ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
++ entry_tramp_text_size(), prot,
++ __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
+
+ /* Map both the text and data into the kernel page table */
+- __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
++ for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
++ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
++ pa_start + i * PAGE_SIZE, prot);
++
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ extern char __entry_tramp_data_start[];
+
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Wed, 24 Nov 2021 11:40:18 +0000
+Subject: arm64: entry: Allow tramp_alias to access symbols after the 4K boundary
+
+From: James Morse <james.morse@arm.com>
+
+commit 6c5bf79b69f911560fbf82214c0971af6e58e682 upstream.
+
+Systems using kpti enter and exit the kernel through a trampoline mapping
+that is always mapped, even when the kernel is not. tramp_valias is a macro
+to find the address of a symbol in the trampoline mapping.
+
+Adding extra sets of vectors will expand the size of the entry.tramp.text
+section to beyond 4K. tramp_valias will be unable to generate addresses
+for symbols beyond 4K as it uses the 12 bit immediate of the add
+instruction.
+
+As there are now two registers available when tramp_alias is called,
+use the extra register to avoid the 4K limit of the 12 bit immediate.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -103,9 +103,12 @@
+ .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
+ .endm
+
+- .macro tramp_alias, dst, sym
++ .macro tramp_alias, dst, sym, tmp
+ mov_q \dst, TRAMP_VALIAS
+- add \dst, \dst, #(\sym - .entry.tramp.text)
++ adr_l \tmp, \sym
++ add \dst, \dst, \tmp
++ adr_l \tmp, .entry.tramp.text
++ sub \dst, \dst, \tmp
+ .endm
+
+ /*
+@@ -429,10 +432,10 @@ alternative_else_nop_endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+ msr far_el1, x29
+- tramp_alias x30, tramp_exit_native
++ tramp_alias x30, tramp_exit_native, x29
+ br x30
+ 4:
+- tramp_alias x30, tramp_exit_compat
++ tramp_alias x30, tramp_exit_compat, x29
+ br x30
+ #endif
+ .else
+@@ -998,7 +1001,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT
+ alternative_else_nop_endif
+
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+- tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
++ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
+ br x5
+ #endif
+ SYM_CODE_END(__sdei_asm_handler)
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Wed, 24 Nov 2021 13:40:09 +0000
+Subject: arm64: entry: Don't assume tramp_vectors is the start of the vectors
+
+From: James Morse <james.morse@arm.com>
+
+commit ed50da7764535f1e24432ded289974f2bf2b0c5a upstream.
+
+The tramp_ventry macro uses tramp_vectors as the address of the vectors
+when calculating which ventry in the 'full fat' vectors to branch to.
+
+While there is one set of tramp_vectors, this will be true.
+Adding multiple sets of vectors will break this assumption.
+
+Move the generation of the vectors to a macro, and pass the start
+of the vectors as an argument to tramp_ventry.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 30 ++++++++++++++++--------------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -652,7 +652,7 @@ alternative_else_nop_endif
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
+- .macro tramp_ventry, regsize = 64
++ .macro tramp_ventry, vector_start, regsize
+ .align 7
+ 1:
+ .if \regsize == 64
+@@ -675,10 +675,10 @@ alternative_insn isb, nop, ARM64_WORKARO
+ ldr x30, =vectors
+ #endif
+ alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+- prfm plil1strm, [x30, #(1b - tramp_vectors)]
++ prfm plil1strm, [x30, #(1b - \vector_start)]
+ alternative_else_nop_endif
+ msr vbar_el1, x30
+- add x30, x30, #(1b - tramp_vectors + 4)
++ add x30, x30, #(1b - \vector_start + 4)
+ isb
+ ret
+ .org 1b + 128 // Did we overflow the ventry slot?
+@@ -697,19 +697,21 @@ alternative_else_nop_endif
+ sb
+ .endm
+
+- .align 11
+-SYM_CODE_START_NOALIGN(tramp_vectors)
++ .macro generate_tramp_vector
++.Lvector_start\@:
+ .space 0x400
+
+- tramp_ventry
+- tramp_ventry
+- tramp_ventry
+- tramp_ventry
+-
+- tramp_ventry 32
+- tramp_ventry 32
+- tramp_ventry 32
+- tramp_ventry 32
++ .rept 4
++ tramp_ventry .Lvector_start\@, 64
++ .endr
++ .rept 4
++ tramp_ventry .Lvector_start\@, 32
++ .endr
++ .endm
++
++ .align 11
++SYM_CODE_START_NOALIGN(tramp_vectors)
++ generate_tramp_vector
+ SYM_CODE_END(tramp_vectors)
+
+ SYM_CODE_START(tramp_exit_native)
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Tue, 23 Nov 2021 18:41:43 +0000
+Subject: arm64: entry: Free up another register on kpti's tramp_exit path
+
+From: James Morse <james.morse@arm.com>
+
+commit 03aff3a77a58b5b52a77e00537a42090ad57b80b upstream.
+
+Kpti stashes x30 in far_el1 while it uses x30 for all its work.
+
+Making the vectors a per-cpu data structure will require a second
+register.
+
+Allow tramp_exit two registers before it unmaps the kernel, by
+leaving x30 on the stack, and stashing x29 in far_el1.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -419,14 +419,16 @@ alternative_else_nop_endif
+ ldp x24, x25, [sp, #16 * 12]
+ ldp x26, x27, [sp, #16 * 13]
+ ldp x28, x29, [sp, #16 * 14]
+- ldr lr, [sp, #S_LR]
+- add sp, sp, #PT_REGS_SIZE // restore sp
+
+ .if \el == 0
+-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
++alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
++ ldr lr, [sp, #S_LR]
++ add sp, sp, #PT_REGS_SIZE // restore sp
++ eret
++alternative_else_nop_endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+- msr far_el1, x30
++ msr far_el1, x29
+ tramp_alias x30, tramp_exit_native
+ br x30
+ 4:
+@@ -434,6 +436,9 @@ alternative_insn eret, nop, ARM64_UNMAP_
+ br x30
+ #endif
+ .else
++ ldr lr, [sp, #S_LR]
++ add sp, sp, #PT_REGS_SIZE // restore sp
++
+ /* Ensure any device/NC reads complete */
+ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+
+@@ -674,10 +679,12 @@ alternative_else_nop_endif
+ .macro tramp_exit, regsize = 64
+ adr x30, tramp_vectors
+ msr vbar_el1, x30
+- tramp_unmap_kernel x30
++ ldr lr, [sp, #S_LR]
++ tramp_unmap_kernel x29
+ .if \regsize == 64
+- mrs x30, far_el1
++ mrs x29, far_el1
+ .endif
++ add sp, sp, #PT_REGS_SIZE // restore sp
+ eret
+ sb
+ .endm
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Thu, 18 Nov 2021 13:16:23 +0000
+Subject: arm64: entry: Make the kpti trampoline's kpti sequence optional
+
+From: James Morse <james.morse@arm.com>
+
+commit c47e4d04ba0f1ea17353d85d45f611277507e07a upstream.
+
+Spectre-BHB needs to add sequences to the vectors. Having one global
+set of vectors is a problem for big/little systems where the sequence
+is costly on cpus that are not vulnerable.
+
+Making the vectors per-cpu in the style of KVM's bh_harden_hyp_vecs
+requires the vectors to be generated by macros.
+
+Make the kpti re-mapping of the kernel optional, so the macros can be
+used without kpti.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -646,9 +646,10 @@ alternative_else_nop_endif
+ sub \dst, \dst, PAGE_SIZE
+ .endm
+
+- .macro tramp_ventry, vector_start, regsize
++ .macro tramp_ventry, vector_start, regsize, kpti
+ .align 7
+ 1:
++ .if \kpti == 1
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+@@ -671,9 +672,14 @@ alternative_insn isb, nop, ARM64_WORKARO
+ alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+ prfm plil1strm, [x30, #(1b - \vector_start)]
+ alternative_else_nop_endif
++
+ msr vbar_el1, x30
+- add x30, x30, #(1b - \vector_start + 4)
+ isb
++ .else
++ ldr x30, =vectors
++ .endif // \kpti == 1
++
++ add x30, x30, #(1b - \vector_start + 4)
+ ret
+ .org 1b + 128 // Did we overflow the ventry slot?
+ .endm
+@@ -691,15 +697,15 @@ alternative_else_nop_endif
+ sb
+ .endm
+
+- .macro generate_tramp_vector
++ .macro generate_tramp_vector, kpti
+ .Lvector_start\@:
+ .space 0x400
+
+ .rept 4
+- tramp_ventry .Lvector_start\@, 64
++ tramp_ventry .Lvector_start\@, 64, \kpti
+ .endr
+ .rept 4
+- tramp_ventry .Lvector_start\@, 32
++ tramp_ventry .Lvector_start\@, 32, \kpti
+ .endr
+ .endm
+
+@@ -710,7 +716,7 @@ alternative_else_nop_endif
+ .pushsection ".entry.tramp.text", "ax"
+ .align 11
+ SYM_CODE_START_NOALIGN(tramp_vectors)
+- generate_tramp_vector
++ generate_tramp_vector kpti=1
+ SYM_CODE_END(tramp_vectors)
+
+ SYM_CODE_START(tramp_exit_native)
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Wed, 24 Nov 2021 15:36:12 +0000
+Subject: arm64: entry: Make the trampoline cleanup optional
+
+From: James Morse <james.morse@arm.com>
+
+commit d739da1694a0eaef0358a42b76904b611539b77b upstream.
+
+Subsequent patches will add additional sets of vectors that use
+the same tricks as the kpti vectors to reach the full-fat vectors.
+The full-fat vectors contain some cleanup for kpti that is patched
+in by alternatives when kpti is in use. Once there are additional
+vectors, the cleanup will be needed in more cases.
+
+But on big/little systems, the cleanup would be harmful if no
+trampoline vector were in use. Instead of forcing CPUs that don't
+need a trampoline vector to use one, make the trampoline cleanup
+optional.
+
+Entry at the top of the vectors will skip the cleanup. The trampoline
+vectors can then skip the first instruction, triggering the cleanup
+to run.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -40,14 +40,18 @@
+ .Lventry_start\@:
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
++ /*
++ * This must be the first instruction of the EL0 vector entries. It is
++ * skipped by the trampoline vectors, to trigger the cleanup.
++ */
++ b .Lskip_tramp_vectors_cleanup\@
+ .if \regsize == 64
+ mrs x30, tpidrro_el0
+ msr tpidrro_el0, xzr
+ .else
+ mov x30, xzr
+ .endif
+-alternative_else_nop_endif
++.Lskip_tramp_vectors_cleanup\@:
+ .endif
+ #endif
+
+@@ -661,7 +665,7 @@ alternative_if_not ARM64_WORKAROUND_CAVI
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ alternative_else_nop_endif
+ msr vbar_el1, x30
+- add x30, x30, #(1b - tramp_vectors)
++ add x30, x30, #(1b - tramp_vectors + 4)
+ isb
+ ret
+ .org 1b + 128 // Did we overflow the ventry slot?
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Tue, 23 Nov 2021 15:43:31 +0000
+Subject: arm64: entry: Move the trampoline data page before the text page
+
+From: James Morse <james.morse@arm.com>
+
+commit c091fb6ae059cda563b2a4d93fdbc548ef34e1d6 upstream.
+
+The trampoline code has a data page that holds the address of the vectors,
+which is unmapped when running in user-space. This ensures that with
+CONFIG_RANDOMIZE_BASE, the randomised address of the kernel can't be
+discovered until after the kernel has been mapped.
+
+If the trampoline text page is extended to include multiple sets of
+vectors, it will be larger than a single page, making it tricky to
+find the data page without knowing the size of the trampoline text
+pages, which will vary with PAGE_SIZE.
+
+Move the data page to appear before the text page. This allows the
+data page to be found without knowing the size of the trampoline text
+pages. 'tramp_vectors' is used to refer to the beginning of the
+.entry.tramp.text section, do that explicitly.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/fixmap.h | 2 +-
+ arch/arm64/kernel/entry.S | 9 +++++++--
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/fixmap.h
++++ b/arch/arm64/include/asm/fixmap.h
+@@ -62,8 +62,8 @@ enum fixed_addresses {
+ #endif /* CONFIG_ACPI_APEI_GHES */
+
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+- FIX_ENTRY_TRAMP_DATA,
+ FIX_ENTRY_TRAMP_TEXT,
++ FIX_ENTRY_TRAMP_DATA,
+ #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ __end_of_permanent_fixed_addresses,
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -644,6 +644,11 @@ alternative_else_nop_endif
+ */
+ .endm
+
++ .macro tramp_data_page dst
++ adr \dst, .entry.tramp.text
++ sub \dst, \dst, PAGE_SIZE
++ .endm
++
+ .macro tramp_ventry, regsize = 64
+ .align 7
+ 1:
+@@ -660,7 +665,7 @@ alternative_else_nop_endif
+ 2:
+ tramp_map_kernel x30
+ #ifdef CONFIG_RANDOMIZE_BASE
+- adr x30, tramp_vectors + PAGE_SIZE
++ tramp_data_page x30
+ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ ldr x30, [x30]
+ #else
+@@ -851,7 +856,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoli
+ 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
+
+ #ifdef CONFIG_RANDOMIZE_BASE
+- adr x4, tramp_vectors + PAGE_SIZE
++ tramp_data_page x4
+ add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+ ldr x4, [x4]
+ #else
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Thu, 18 Nov 2021 14:02:30 +0000
+Subject: arm64: entry: Move trampoline macros out of ifdef'd section
+
+From: James Morse <james.morse@arm.com>
+
+commit 13d7a08352a83ef2252aeb464a5e08dfc06b5dfd upstream.
+
+The macros for building the kpti trampoline are all behind
+CONFIG_UNMAP_KERNEL_AT_EL0, and in a region that outputs to the
+.entry.tramp.text section.
+
+Move the macros out so they can be used to generate other kinds of
+trampoline. Only the symbols need to be guarded by
+CONFIG_UNMAP_KERNEL_AT_EL0 and appear in the .entry.tramp.text section.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -608,12 +608,6 @@ SYM_CODE_END(ret_to_user)
+
+ .popsection // .entry.text
+
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-/*
+- * Exception vectors trampoline.
+- */
+- .pushsection ".entry.tramp.text", "ax"
+-
+ // Move from tramp_pg_dir to swapper_pg_dir
+ .macro tramp_map_kernel, tmp
+ mrs \tmp, ttbr1_el1
+@@ -709,6 +703,11 @@ alternative_else_nop_endif
+ .endr
+ .endm
+
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++/*
++ * Exception vectors trampoline.
++ */
++ .pushsection ".entry.tramp.text", "ax"
+ .align 11
+ SYM_CODE_START_NOALIGN(tramp_vectors)
+ generate_tramp_vector
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Wed, 17 Nov 2021 15:15:26 +0000
+Subject: arm64: entry.S: Add ventry overflow sanity checks
+
+From: James Morse <james.morse@arm.com>
+
+commit 4330e2c5c04c27bebf89d34e0bc14e6943413067 upstream.
+
+Subsequent patches add even more code to the ventry slots.
+Ensure kernels that overflow a ventry slot don't get built.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -37,6 +37,7 @@
+
+ .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
+ .align 7
++.Lventry_start\@:
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+ alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+@@ -95,6 +96,7 @@ alternative_else_nop_endif
+ mrs x0, tpidrro_el0
+ #endif
+ b el\el\ht\()_\regsize\()_\label
++.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
+ .endm
+
+ .macro tramp_alias, dst, sym
+@@ -662,6 +664,7 @@ alternative_else_nop_endif
+ add x30, x30, #(1b - tramp_vectors)
+ isb
+ ret
++.org 1b + 128 // Did we overflow the ventry slot?
+ .endm
+
+ .macro tramp_exit, regsize = 64
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Wed, 10 Nov 2021 14:48:00 +0000
+Subject: arm64: Mitigate spectre style branch history side channels
+
+From: James Morse <james.morse@arm.com>
+
+commit 558c303c9734af5a813739cd284879227f7297d2 upstream.
+
+Speculation attacks against some high-performance processors can
+make use of branch history to influence future speculation.
+When taking an exception from user-space, a sequence of branches
+or a firmware call overwrites or invalidates the branch history.
+
+The sequence of branches is added to the vectors, and should appear
+before the first indirect branch. For systems using KPTI the sequence
+is added to the kpti trampoline where it has a free register as the exit
+from the trampoline is via a 'ret'. For systems not using KPTI, the same
+register tricks are used to free up a register in the vectors.
+
+For the firmware call, arch-workaround-3 clobbers 4 registers, so
+there is no choice but to save them to the EL1 stack. This only happens
+for entry from EL0, so if we take an exception due to the stack access,
+it will not become re-entrant.
+
+For KVM, the existing branch-predictor-hardening vectors are used.
+When a spectre version of these vectors is in use, the firmware call
+is sufficient to mitigate against Spectre-BHB. For the non-spectre
+versions, the sequence of branches is added to the indirect vector.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig | 9 +
+ arch/arm64/include/asm/assembler.h | 14 +
+ arch/arm64/include/asm/cpufeature.h | 16 ++
+ arch/arm64/include/asm/cputype.h | 8 +
+ arch/arm64/include/asm/spectre.h | 4
+ arch/arm64/include/asm/sysreg.h | 1
+ arch/arm64/include/asm/vectors.h | 5
+ arch/arm64/kernel/cpu_errata.c | 7
+ arch/arm64/kernel/image-vars.h | 3
+ arch/arm64/kernel/proton-pack.c | 278 ++++++++++++++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/hyp-entry.S | 8 +
+ arch/arm64/tools/cpucaps | 1
+ 12 files changed, 352 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1184,6 +1184,15 @@ config UNMAP_KERNEL_AT_EL0
+
+ If unsure, say Y.
+
++config MITIGATE_SPECTRE_BRANCH_HISTORY
++ bool "Mitigate Spectre style attacks against branch history" if EXPERT
++ default y
++ help
++ Speculation attacks against some high-performance processors can
++ make use of branch history to influence future speculation.
++ When taking an exception from user-space, a sequence of branches
++ or a firmware call overwrites the branch history.
++
+ config RODATA_FULL_DEFAULT_ENABLED
+ bool "Apply r/o permissions of VM areas also to their linear aliases"
+ default y
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -832,7 +832,9 @@ alternative_endif
+
+ .macro __mitigate_spectre_bhb_loop tmp
+ #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+- mov \tmp, #32
++alternative_cb spectre_bhb_patch_loop_iter
++ mov \tmp, #32 // Patched to correct the immediate
++alternative_cb_end
+ .Lspectre_bhb_loop\@:
+ b . + 4
+ subs \tmp, \tmp, #1
+@@ -841,6 +843,16 @@ alternative_endif
+ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ .endm
+
++ .macro mitigate_spectre_bhb_loop tmp
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++alternative_cb spectre_bhb_patch_loop_mitigation_enable
++ b .L_spectre_bhb_loop_done\@ // Patched to NOP
++alternative_cb_end
++ __mitigate_spectre_bhb_loop \tmp
++.L_spectre_bhb_loop_done\@:
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++ .endm
++
+ /* Save/restores x0-x3 to the stack */
+ .macro __mitigate_spectre_bhb_fw
+ #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -637,6 +637,22 @@ static inline bool cpu_supports_mixed_en
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+ }
+
++
++static inline bool supports_csv2p3(int scope)
++{
++ u64 pfr0;
++ u8 csv2_val;
++
++ if (scope == SCOPE_LOCAL_CPU)
++ pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
++ else
++ pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++
++ csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
++ ID_AA64PFR0_CSV2_SHIFT);
++ return csv2_val == 3;
++}
++
+ const struct cpumask *system_32bit_el0_cpumask(void);
+ DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -73,10 +73,14 @@
+ #define ARM_CPU_PART_CORTEX_A76 0xD0B
+ #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
+ #define ARM_CPU_PART_CORTEX_A77 0xD0D
++#define ARM_CPU_PART_NEOVERSE_V1 0xD40
++#define ARM_CPU_PART_CORTEX_A78 0xD41
++#define ARM_CPU_PART_CORTEX_X1 0xD44
+ #define ARM_CPU_PART_CORTEX_A510 0xD46
+ #define ARM_CPU_PART_CORTEX_A710 0xD47
+ #define ARM_CPU_PART_CORTEX_X2 0xD48
+ #define ARM_CPU_PART_NEOVERSE_N2 0xD49
++#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+
+ #define APM_CPU_PART_POTENZA 0x000
+
+@@ -117,10 +121,14 @@
+ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+ #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+ #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
++#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
++#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
++#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+ #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
++#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -94,6 +94,8 @@ void spectre_v4_enable_task_mitigation(s
+ enum mitigation_state arm64_get_meltdown_state(void);
+
+ enum mitigation_state arm64_get_spectre_bhb_state(void);
+-
++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
++u8 spectre_bhb_loop_affected(int scope);
++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ASM_SPECTRE_H */
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -896,6 +896,7 @@
+ #endif
+
+ /* id_aa64mmfr1 */
++#define ID_AA64MMFR1_ECBHB_SHIFT 60
+ #define ID_AA64MMFR1_AFP_SHIFT 44
+ #define ID_AA64MMFR1_ETS_SHIFT 36
+ #define ID_AA64MMFR1_TWED_SHIFT 32
+--- a/arch/arm64/include/asm/vectors.h
++++ b/arch/arm64/include/asm/vectors.h
+@@ -40,6 +40,11 @@ enum arm64_bp_harden_el1_vectors {
+ EL1_VECTOR_KPTI,
+ };
+
++#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++#define EL1_VECTOR_BHB_LOOP -1
++#define EL1_VECTOR_BHB_FW -1
++#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++
+ /* The vectors to use on return from EL0. e.g. to remap the kernel */
+ DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -464,6 +464,13 @@ const struct arm64_cpu_capabilities arm6
+ .matches = has_spectre_v4,
+ .cpu_enable = spectre_v4_enable_mitigation,
+ },
++ {
++ .desc = "Spectre-BHB",
++ .capability = ARM64_SPECTRE_BHB,
++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++ .matches = is_spectre_bhb_affected,
++ .cpu_enable = spectre_bhb_enable_mitigation,
++ },
+ #ifdef CONFIG_ARM64_ERRATUM_1418040
+ {
+ .desc = "ARM erratum 1418040",
+--- a/arch/arm64/kernel/image-vars.h
++++ b/arch/arm64/kernel/image-vars.h
+@@ -66,6 +66,9 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch);
+ KVM_NVHE_ALIAS(kvm_update_va_mask);
+ KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
+ KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
++KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
++KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
++KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
+
+ /* Global kernel state accessed by nVHE hyp code. */
+ KVM_NVHE_ALIAS(kvm_vgic_global_state);
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -24,9 +24,11 @@
+ #include <linux/prctl.h>
+ #include <linux/sched/task_stack.h>
+
++#include <asm/debug-monitors.h>
+ #include <asm/insn.h>
+ #include <asm/spectre.h>
+ #include <asm/traps.h>
++#include <asm/vectors.h>
+ #include <asm/virt.h>
+
+ /*
+@@ -796,6 +798,17 @@ int arch_prctl_spec_ctrl_get(struct task
+ }
+ }
+
++/*
++ * Spectre BHB.
++ *
++ * A CPU is either:
++ * - Mitigated by a branchy loop a CPU specific number of times, and listed
++ * in our "loop mitigated list".
++ * - Mitigated in software by the firmware Spectre v2 call.
++ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
++ * software mitigation in the vectors is needed.
++ * - Has CSV2.3, so is unaffected.
++ */
+ static enum mitigation_state spectre_bhb_state;
+
+ enum mitigation_state arm64_get_spectre_bhb_state(void)
+@@ -803,12 +816,227 @@ enum mitigation_state arm64_get_spectre_
+ return spectre_bhb_state;
+ }
+
++enum bhb_mitigation_bits {
++ BHB_LOOP,
++ BHB_FW,
++ BHB_HW,
++};
++static unsigned long system_bhb_mitigations;
++
++/*
++ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
++ * SCOPE_SYSTEM call will give the right answer.
++ */
++u8 spectre_bhb_loop_affected(int scope)
++{
++ u8 k = 0;
++ static u8 max_bhb_k;
++
++ if (scope == SCOPE_LOCAL_CPU) {
++ static const struct midr_range spectre_bhb_k32_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
++ {},
++ };
++ static const struct midr_range spectre_bhb_k24_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
++ {},
++ };
++ static const struct midr_range spectre_bhb_k8_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++ {},
++ };
++
++ if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
++ k = 32;
++ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
++ k = 24;
++ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
++ k = 8;
++
++ max_bhb_k = max(max_bhb_k, k);
++ } else {
++ k = max_bhb_k;
++ }
++
++ return k;
++}
++
++static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
++{
++ int ret;
++ struct arm_smccc_res res;
++
++ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_3, &res);
++
++ ret = res.a0;
++ switch (ret) {
++ case SMCCC_RET_SUCCESS:
++ return SPECTRE_MITIGATED;
++ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++ return SPECTRE_UNAFFECTED;
++ default:
++ fallthrough;
++ case SMCCC_RET_NOT_SUPPORTED:
++ return SPECTRE_VULNERABLE;
++ }
++}
++
++static bool is_spectre_bhb_fw_affected(int scope)
++{
++ static bool system_affected;
++ enum mitigation_state fw_state;
++ bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
++ static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
++ {},
++ };
++ bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
++ spectre_bhb_firmware_mitigated_list);
++
++ if (scope != SCOPE_LOCAL_CPU)
++ return system_affected;
++
++ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
++ if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
++ system_affected = true;
++ return true;
++ }
++
++ return false;
++}
++
++static bool supports_ecbhb(int scope)
++{
++ u64 mmfr1;
++
++ if (scope == SCOPE_LOCAL_CPU)
++ mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
++ else
++ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
++
++ return cpuid_feature_extract_unsigned_field(mmfr1,
++ ID_AA64MMFR1_ECBHB_SHIFT);
++}
++
++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++ if (supports_csv2p3(scope))
++ return false;
++
++ if (spectre_bhb_loop_affected(scope))
++ return true;
++
++ if (is_spectre_bhb_fw_affected(scope))
++ return true;
++
++ return false;
++}
++
++static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
++{
++ const char *v = arm64_get_bp_hardening_vector(slot);
++
++ if (slot < 0)
++ return;
++
++ __this_cpu_write(this_cpu_vector, v);
++
++ /*
++ * When KPTI is in use, the vectors are switched when exiting to
++ * user-space.
++ */
++ if (arm64_kernel_unmapped_at_el0())
++ return;
++
++ write_sysreg(v, vbar_el1);
++ isb();
++}
++
++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
++{
++ bp_hardening_cb_t cpu_cb;
++ enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
++ struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
++
++ if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
++ return;
++
++ if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
++ /* No point mitigating Spectre-BHB alone. */
++ } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
++ pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
++ } else if (cpu_mitigations_off()) {
++ pr_info_once("spectre-bhb mitigation disabled by command line option\n");
++ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
++ state = SPECTRE_MITIGATED;
++ set_bit(BHB_HW, &system_bhb_mitigations);
++ } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
++ /*
++ * Ensure KVM uses the indirect vector which will have the
++ * branchy-loop added. A57/A72-r0 will already have selected
++ * the spectre-indirect vector, which is sufficient for BHB
++ * too.
++ */
++ if (!data->slot)
++ data->slot = HYP_VECTOR_INDIRECT;
++
++ this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
++ state = SPECTRE_MITIGATED;
++ set_bit(BHB_LOOP, &system_bhb_mitigations);
++ } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
++ fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
++ if (fw_state == SPECTRE_MITIGATED) {
++ /*
++ * Ensure KVM uses one of the spectre bp_hardening
++ * vectors. The indirect vector doesn't include the EL3
++ * call, so needs upgrading to
++ * HYP_VECTOR_SPECTRE_INDIRECT.
++ */
++ if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
++ data->slot += 1;
++
++ this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
++
++ /*
++ * The WA3 call in the vectors supersedes the WA1 call
++ * made during context-switch. Uninstall any firmware
++ * bp_hardening callback.
++ */
++ cpu_cb = spectre_v2_get_sw_mitigation_cb();
++ if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
++ __this_cpu_write(bp_hardening_data.fn, NULL);
++
++ state = SPECTRE_MITIGATED;
++ set_bit(BHB_FW, &system_bhb_mitigations);
++ }
++ }
++
++ update_mitigation_state(&spectre_bhb_state, state);
++}
++
+ /* Patched to NOP when enabled */
+ void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
+ __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+ {
+ BUG_ON(nr_inst != 1);
++
++ if (test_bit(BHB_LOOP, &system_bhb_mitigations))
++ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+ }
+
+ /* Patched to NOP when enabled */
+@@ -817,4 +1045,54 @@ void noinstr spectre_bhb_patch_fw_mitiga
+ __le32 *updptr, int nr_inst)
+ {
+ BUG_ON(nr_inst != 1);
++
++ if (test_bit(BHB_FW, &system_bhb_mitigations))
++ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
++}
++
++/* Patched to correct the immediate */
++void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++ u8 rd;
++ u32 insn;
++ u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
++
++ BUG_ON(nr_inst != 1); /* MOV -> MOV */
++
++ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
++ return;
++
++ insn = le32_to_cpu(*origptr);
++ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
++ insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
++ AARCH64_INSN_VARIANT_64BIT,
++ AARCH64_INSN_MOVEWIDE_ZERO);
++ *updptr++ = cpu_to_le32(insn);
++}
++
++/* Patched to mov WA3 when supported */
++void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++ u8 rd;
++ u32 insn;
++
++ BUG_ON(nr_inst != 1); /* MOV -> MOV */
++
++ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
++ !test_bit(BHB_FW, &system_bhb_mitigations))
++ return;
++
++ insn = le32_to_cpu(*origptr);
++ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
++
++ insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
++ AARCH64_INSN_VARIANT_32BIT,
++ AARCH64_INSN_REG_ZR, rd,
++ ARM_SMCCC_ARCH_WORKAROUND_3);
++ if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
++ return;
++
++ *updptr++ = cpu_to_le32(insn);
+ }
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -62,6 +62,10 @@ el1_sync: // Guest trapped into EL2
+ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
+ ARM_SMCCC_ARCH_WORKAROUND_2)
++ cbz w1, wa_epilogue
++
++ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
++ ARM_SMCCC_ARCH_WORKAROUND_3)
+ cbnz w1, el1_trap
+
+ wa_epilogue:
+@@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector)
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
++ alternative_cb spectre_bhb_patch_wa3
++ /* Patched to mov WA3 when supported */
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
++ alternative_cb_end
+ smc #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ add sp, sp, #(8 * 2)
+@@ -205,6 +212,7 @@ SYM_CODE_END(__kvm_hyp_vector)
+ spectrev2_smccc_wa1_smc
+ .else
+ stp x0, x1, [sp, #-16]!
++ mitigate_spectre_bhb_loop x0
+ .endif
+ .if \indirect != 0
+ alternative_cb kvm_patch_vector_branch
+--- a/arch/arm64/tools/cpucaps
++++ b/arch/arm64/tools/cpucaps
+@@ -42,6 +42,7 @@ MTE
+ SPECTRE_V2
+ SPECTRE_V3A
+ SPECTRE_V4
++SPECTRE_BHB
+ SSBS
+ SVE
+ UNMAP_KERNEL_AT_EL0
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Thu, 3 Mar 2022 16:53:56 +0000
+Subject: arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting
+
+From: James Morse <james.morse@arm.com>
+
+commit 58c9a5060cb7cd529d49c93954cdafe81c1d642a upstream.
+
+The mitigations for Spectre-BHB are only applied when an exception is
+taken from user-space. The mitigation status is reported via the spectre_v2
+sysfs vulnerabilities file.
+
+When unprivileged eBPF is enabled the mitigation in the exception vectors
+can be avoided by an eBPF program.
+
+When unprivileged eBPF is enabled, print a warning and report vulnerable
+via the sysfs vulnerabilities file.
+
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/proton-pack.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -18,6 +18,7 @@
+ */
+
+ #include <linux/arm-smccc.h>
++#include <linux/bpf.h>
+ #include <linux/cpu.h>
+ #include <linux/device.h>
+ #include <linux/nospec.h>
+@@ -111,6 +112,15 @@ static const char *get_bhb_affected_stri
+ }
+ }
+
++static bool _unprivileged_ebpf_enabled(void)
++{
++#ifdef CONFIG_BPF_SYSCALL
++ return !sysctl_unprivileged_bpf_disabled;
++#else
++ return false;
++#endif
++}
++
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+@@ -130,6 +140,9 @@ ssize_t cpu_show_spectre_v2(struct devic
+ v2_str = "CSV2";
+ fallthrough;
+ case SPECTRE_MITIGATED:
++ if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
++ return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
++
+ return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+@@ -1125,3 +1138,16 @@ void __init spectre_bhb_patch_clearbhb(s
+ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+ }
++
++#ifdef CONFIG_BPF_SYSCALL
++#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
++void unpriv_ebpf_notify(int new_state)
++{
++ if (spectre_v2_state == SPECTRE_VULNERABLE ||
++ spectre_bhb_state != SPECTRE_MITIGATED)
++ return;
++
++ if (!new_state)
++ pr_err("WARNING: %s", EBPF_WARN);
++}
++#endif
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Tue, 8 Feb 2022 16:08:13 +0000
+Subject: arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2
+
+From: James Morse <james.morse@arm.com>
+
+commit dee435be76f4117410bbd90573a881fd33488f37 upstream.
+
+Speculation attacks against some high-performance processors can
+make use of branch history to influence future speculation as part of
+a spectre-v2 attack. This is not mitigated by CSV2, meaning CPUs that
+previously reported 'Not affected' are now moderately mitigated by CSV2.
+
+Update the value in /sys/devices/system/cpu/vulnerabilities/spectre_v2
+to also show the state of the BHB mitigation.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/spectre.h | 2 ++
+ arch/arm64/kernel/proton-pack.c | 36 ++++++++++++++++++++++++++++++++++--
+ 2 files changed, 36 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -93,5 +93,7 @@ void spectre_v4_enable_task_mitigation(s
+
+ enum mitigation_state arm64_get_meltdown_state(void);
+
++enum mitigation_state arm64_get_spectre_bhb_state(void);
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ASM_SPECTRE_H */
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -96,14 +96,39 @@ static bool spectre_v2_mitigations_off(v
+ return ret;
+ }
+
++static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
++{
++ switch (bhb_state) {
++ case SPECTRE_UNAFFECTED:
++ return "";
++ default:
++ case SPECTRE_VULNERABLE:
++ return ", but not BHB";
++ case SPECTRE_MITIGATED:
++ return ", BHB";
++ }
++}
++
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
++ enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
++ const char *bhb_str = get_bhb_affected_string(bhb_state);
++ const char *v2_str = "Branch predictor hardening";
++
+ switch (spectre_v2_state) {
+ case SPECTRE_UNAFFECTED:
+- return sprintf(buf, "Not affected\n");
++ if (bhb_state == SPECTRE_UNAFFECTED)
++ return sprintf(buf, "Not affected\n");
++
++ /*
++ * Platforms affected by Spectre-BHB can't report
++ * "Not affected" for Spectre-v2.
++ */
++ v2_str = "CSV2";
++ fallthrough;
+ case SPECTRE_MITIGATED:
+- return sprintf(buf, "Mitigation: Branch predictor hardening\n");
++ return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
+ case SPECTRE_VULNERABLE:
+ fallthrough;
+ default:
+@@ -771,6 +796,13 @@ int arch_prctl_spec_ctrl_get(struct task
+ }
+ }
+
++static enum mitigation_state spectre_bhb_state;
++
++enum mitigation_state arm64_get_spectre_bhb_state(void)
++{
++ return spectre_bhb_state;
++}
++
+ /* Patched to NOP when enabled */
+ void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
+ __le32 *origptr,
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Tue, 16 Nov 2021 15:00:51 +0000
+Subject: arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit
+
+From: James Morse <james.morse@arm.com>
+
+commit 1b33d4860deaecf1d8eec3061b7e7ed7ab0bae8d upstream.
+
+The spectre-v4 sequence includes an SMC from the assembly entry code.
+spectre_v4_patch_fw_mitigation_conduit is the patching callback that
+generates an HVC or SMC depending on the SMCCC conduit type.
+
+As this isn't specific to spectre-v4, rename it
+smccc_patch_fw_mitigation_conduit so it can be re-used.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/entry.S | 2 +-
+ arch/arm64/kernel/proton-pack.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -118,7 +118,7 @@ alternative_cb_end
+ tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+-alternative_cb spectre_v4_patch_fw_mitigation_conduit
++alternative_cb smccc_patch_fw_mitigation_conduit
+ nop // Patched to SMC/HVC #0
+ alternative_cb_end
+ .L__asm_ssbd_skip\@:
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -554,9 +554,9 @@ void __init spectre_v4_patch_fw_mitigati
+ * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+ * to call into firmware to adjust the mitigation state.
+ */
+-void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
+- __le32 *origptr,
+- __le32 *updptr, int nr_inst)
++void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
++ __le32 *origptr,
++ __le32 *updptr, int nr_inst)
+ {
+ u32 insn;
+
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Fri, 10 Dec 2021 14:32:56 +0000
+Subject: arm64: Use the clearbhb instruction in mitigations
+
+From: James Morse <james.morse@arm.com>
+
+commit 228a26b912287934789023b4132ba76065d9491c upstream.
+
+Future CPUs may implement a clearbhb instruction that is sufficient
+to mitigate SpectreBHB. CPUs that implement this instruction, but
+not CSV2.3 must be affected by Spectre-BHB.
+
+Add support to use this instruction as the BHB mitigation on CPUs
+that support it. The instruction is in the hint space, so it will
+be treated by a NOP as older CPUs.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/assembler.h | 17 +++++++++++++++++
+ arch/arm64/include/asm/cpufeature.h | 13 +++++++++++++
+ arch/arm64/include/asm/insn.h | 1 +
+ arch/arm64/include/asm/sysreg.h | 1 +
+ arch/arm64/include/asm/vectors.h | 7 +++++++
+ arch/arm64/kernel/cpufeature.c | 1 +
+ arch/arm64/kernel/entry.S | 8 ++++++++
+ arch/arm64/kernel/image-vars.h | 1 +
+ arch/arm64/kernel/proton-pack.c | 29 +++++++++++++++++++++++++++++
+ arch/arm64/kvm/hyp/hyp-entry.S | 1 +
+ 10 files changed, 79 insertions(+)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -108,6 +108,13 @@
+ .endm
+
+ /*
++ * Clear Branch History instruction
++ */
++ .macro clearbhb
++ hint #22
++ .endm
++
++/*
+ * Speculation barrier
+ */
+ .macro sb
+@@ -866,4 +873,14 @@ alternative_cb_end
+ ldp x0, x1, [sp], #16
+ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ .endm
++
++ .macro mitigate_spectre_bhb_clear_insn
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++alternative_cb spectre_bhb_patch_clearbhb
++ /* Patched to NOP when not supported */
++ clearbhb
++ isb
++alternative_cb_end
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++ .endm
+ #endif /* __ASM_ASSEMBLER_H */
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -653,6 +653,19 @@ static inline bool supports_csv2p3(int s
+ return csv2_val == 3;
+ }
+
++static inline bool supports_clearbhb(int scope)
++{
++ u64 isar2;
++
++ if (scope == SCOPE_LOCAL_CPU)
++ isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
++ else
++ isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
++
++ return cpuid_feature_extract_unsigned_field(isar2,
++ ID_AA64ISAR2_CLEARBHB_SHIFT);
++}
++
+ const struct cpumask *system_32bit_el0_cpumask(void);
+ DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op {
+ AARCH64_INSN_HINT_PSB = 0x11 << 5,
+ AARCH64_INSN_HINT_TSB = 0x12 << 5,
+ AARCH64_INSN_HINT_CSDB = 0x14 << 5,
++ AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
+
+ AARCH64_INSN_HINT_BTI = 0x20 << 5,
+ AARCH64_INSN_HINT_BTIC = 0x22 << 5,
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -766,6 +766,7 @@
+ #define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+
+ /* id_aa64isar2 */
++#define ID_AA64ISAR2_CLEARBHB_SHIFT 28
+ #define ID_AA64ISAR2_RPRES_SHIFT 4
+ #define ID_AA64ISAR2_WFXT_SHIFT 0
+
+--- a/arch/arm64/include/asm/vectors.h
++++ b/arch/arm64/include/asm/vectors.h
+@@ -32,6 +32,12 @@ enum arm64_bp_harden_el1_vectors {
+ * canonical vectors.
+ */
+ EL1_VECTOR_BHB_FW,
++
++ /*
++ * Use the ClearBHB instruction, before branching to the canonical
++ * vectors.
++ */
++ EL1_VECTOR_BHB_CLEAR_INSN,
+ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
+ /*
+@@ -43,6 +49,7 @@ enum arm64_bp_harden_el1_vectors {
+ #ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ #define EL1_VECTOR_BHB_LOOP -1
+ #define EL1_VECTOR_BHB_FW -1
++#define EL1_VECTOR_BHB_CLEAR_INSN -1
+ #endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
+ /* The vectors to use on return from EL0. e.g. to remap the kernel */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -231,6 +231,7 @@ static const struct arm64_ftr_bits ftr_i
+ };
+
+ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
+ ARM64_FTR_END,
+ };
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -657,6 +657,7 @@ alternative_else_nop_endif
+ #define BHB_MITIGATION_NONE 0
+ #define BHB_MITIGATION_LOOP 1
+ #define BHB_MITIGATION_FW 2
++#define BHB_MITIGATION_INSN 3
+
+ .macro tramp_ventry, vector_start, regsize, kpti, bhb
+ .align 7
+@@ -673,6 +674,11 @@ alternative_else_nop_endif
+ __mitigate_spectre_bhb_loop x30
+ .endif // \bhb == BHB_MITIGATION_LOOP
+
++ .if \bhb == BHB_MITIGATION_INSN
++ clearbhb
++ isb
++ .endif // \bhb == BHB_MITIGATION_INSN
++
+ .if \kpti == 1
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+@@ -749,6 +755,7 @@ SYM_CODE_START_NOALIGN(tramp_vectors)
+ #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
++ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
+ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
+ SYM_CODE_END(tramp_vectors)
+@@ -811,6 +818,7 @@ SYM_CODE_START(__bp_harden_el1_vectors)
+ #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_el1_vector bhb=BHB_MITIGATION_LOOP
+ generate_el1_vector bhb=BHB_MITIGATION_FW
++ generate_el1_vector bhb=BHB_MITIGATION_INSN
+ #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ SYM_CODE_END(__bp_harden_el1_vectors)
+ .popsection
+--- a/arch/arm64/kernel/image-vars.h
++++ b/arch/arm64/kernel/image-vars.h
+@@ -69,6 +69,7 @@ KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0
+ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
+ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
+ KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
++KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
+
+ /* Global kernel state accessed by nVHE hyp code. */
+ KVM_NVHE_ALIAS(kvm_vgic_global_state);
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -805,6 +805,7 @@ int arch_prctl_spec_ctrl_get(struct task
+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
+ * in our "loop mitigated list".
+ * - Mitigated in software by the firmware Spectre v2 call.
++ * - Has the ClearBHB instruction to perform the mitigation.
+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
+ * software mitigation in the vectors is needed.
+ * - Has CSV2.3, so is unaffected.
+@@ -820,6 +821,7 @@ enum bhb_mitigation_bits {
+ BHB_LOOP,
+ BHB_FW,
+ BHB_HW,
++ BHB_INSN,
+ };
+ static unsigned long system_bhb_mitigations;
+
+@@ -937,6 +939,9 @@ bool is_spectre_bhb_affected(const struc
+ if (supports_csv2p3(scope))
+ return false;
+
++ if (supports_clearbhb(scope))
++ return true;
++
+ if (spectre_bhb_loop_affected(scope))
+ return true;
+
+@@ -984,6 +989,17 @@ void spectre_bhb_enable_mitigation(const
+ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
+ state = SPECTRE_MITIGATED;
+ set_bit(BHB_HW, &system_bhb_mitigations);
++ } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
++ /*
++ * Ensure KVM uses the indirect vector which will have ClearBHB
++ * added.
++ */
++ if (!data->slot)
++ data->slot = HYP_VECTOR_INDIRECT;
++
++ this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
++ state = SPECTRE_MITIGATED;
++ set_bit(BHB_INSN, &system_bhb_mitigations);
+ } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+ /*
+ * Ensure KVM uses the indirect vector which will have the
+@@ -1096,3 +1112,16 @@ void noinstr spectre_bhb_patch_wa3(struc
+
+ *updptr++ = cpu_to_le32(insn);
+ }
++
++/* Patched to NOP when not supported */
++void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
++ __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++ BUG_ON(nr_inst != 2);
++
++ if (test_bit(BHB_INSN, &system_bhb_mitigations))
++ return;
++
++ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
++ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
++}
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -213,6 +213,7 @@ SYM_CODE_END(__kvm_hyp_vector)
+ .else
+ stp x0, x1, [sp, #-16]!
+ mitigate_spectre_bhb_loop x0
++ mitigate_spectre_bhb_clear_insn
+ .endif
+ .if \indirect != 0
+ alternative_cb kvm_patch_vector_branch
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Tue, 16 Nov 2021 15:06:19 +0000
+Subject: KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A
+
+From: James Morse <james.morse@arm.com>
+
+commit 5bdf3437603d4af87f9c7f424b0c8aeed2420745 upstream.
+
+CPUs vulnerable to Spectre-BHB either need to make an SMC-CC firmware
+call from the vectors, or run a sequence of branches. This gets added
+to the hyp vectors. If there is no support for arch-workaround-1 in
+firmware, the indirect vector will be used.
+
+kvm_init_vector_slots() only initialises the two indirect slots if
+the platform is vulnerable to Spectre-v3a. pKVM's hyp_map_vectors()
+only initialises __hyp_bp_vect_base if the platform is vulnerable to
+Spectre-v3a.
+
+As there are about to more users of the indirect vectors, ensure
+their entries in hyp_spectre_vector_selector[] are always initialised,
+and __hyp_bp_vect_base defaults to the regular VA mapping.
+
+The Spectre-v3a check is moved to a helper
+kvm_system_needs_idmapped_vectors(), and merged with the code
+that creates the hyp mappings.
+
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 5 +++++
+ arch/arm64/kvm/arm.c | 5 +----
+ arch/arm64/kvm/hyp/nvhe/mm.c | 4 +++-
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -711,6 +711,11 @@ static inline void kvm_init_host_cpu_con
+ ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
+ }
+
++static inline bool kvm_system_needs_idmapped_vectors(void)
++{
++ return cpus_have_const_cap(ARM64_SPECTRE_V3A);
++}
++
+ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
+
+ static inline void kvm_arch_hardware_unsetup(void) {}
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1458,10 +1458,7 @@ static int kvm_init_vector_slots(void)
+ base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
+ kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
+
+- if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
+- return 0;
+-
+- if (!has_vhe()) {
++ if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
+ err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
+ __BP_HARDEN_HYP_VECS_SZ, &base);
+ if (err)
+--- a/arch/arm64/kvm/hyp/nvhe/mm.c
++++ b/arch/arm64/kvm/hyp/nvhe/mm.c
+@@ -146,8 +146,10 @@ int hyp_map_vectors(void)
+ phys_addr_t phys;
+ void *bp_base;
+
+- if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
++ if (!kvm_system_needs_idmapped_vectors()) {
++ __hyp_bp_vect_base = __bp_harden_hyp_vecs;
+ return 0;
++ }
+
+ phys = __hyp_pa(__bp_harden_hyp_vecs);
+ bp_base = (void *)__pkvm_create_private_mapping(phys,
--- /dev/null
+From foo@baz Tue Mar 8 08:47:19 PM CET 2022
+From: James Morse <james.morse@arm.com>
+Date: Fri, 10 Dec 2021 11:16:18 +0000
+Subject: KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated
+
+From: James Morse <james.morse@arm.com>
+
+commit a5905d6af492ee6a4a2205f0d550b3f931b03d03 upstream.
+
+KVM allows the guest to discover whether the ARCH_WORKAROUND SMCCC are
+implemented, and to preserve that state during migration through its
+firmware register interface.
+
+Add the necessary boiler plate for SMCCC_ARCH_WORKAROUND_3.
+
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/uapi/asm/kvm.h | 5 +++++
+ arch/arm64/kvm/hypercalls.c | 12 ++++++++++++
+ arch/arm64/kvm/psci.c | 18 +++++++++++++++++-
+ 3 files changed, 34 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags {
+ #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+ #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
+
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
++
+ /* SVE registers */
+ #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
+
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -107,6 +107,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu
+ break;
+ }
+ break;
++ case ARM_SMCCC_ARCH_WORKAROUND_3:
++ switch (arm64_get_spectre_bhb_state()) {
++ case SPECTRE_VULNERABLE:
++ break;
++ case SPECTRE_MITIGATED:
++ val[0] = SMCCC_RET_SUCCESS;
++ break;
++ case SPECTRE_UNAFFECTED:
++ val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
++ break;
++ }
++ break;
+ case ARM_SMCCC_HV_PV_TIME_FEATURES:
+ val[0] = SMCCC_RET_SUCCESS;
+ break;
+--- a/arch/arm64/kvm/psci.c
++++ b/arch/arm64/kvm/psci.c
+@@ -406,7 +406,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
+
+ int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+ {
+- return 3; /* PSCI version and two workaround registers */
++ return 4; /* PSCI version and three workaround registers */
+ }
+
+ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+@@ -420,6 +420,9 @@ int kvm_arm_copy_fw_reg_indices(struct k
+ if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
+ return -EFAULT;
+
++ if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
++ return -EFAULT;
++
+ return 0;
+ }
+
+@@ -459,6 +462,17 @@ static int get_kernel_wa_level(u64 regid
+ case SPECTRE_VULNERABLE:
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+ }
++ break;
++ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
++ switch (arm64_get_spectre_bhb_state()) {
++ case SPECTRE_VULNERABLE:
++ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
++ case SPECTRE_MITIGATED:
++ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
++ case SPECTRE_UNAFFECTED:
++ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
++ }
++ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
+ }
+
+ return -EINVAL;
+@@ -475,6 +489,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *
+ break;
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
++ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
+ val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
+ break;
+ default:
+@@ -520,6 +535,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *
+ }
+
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
++ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
+ if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
+ return -EINVAL;
+
arm-use-loadaddr-to-get-load-address-of-sections.patch
arm-spectre-bhb-workaround.patch
arm-include-unprivileged-bpf-status-in-spectre-v2-reporting.patch
+arm64-add-neoverse-n2-cortex-a710-cpu-part-definition.patch
+arm64-add-hwcap-for-self-synchronising-virtual-counter.patch
+arm64-add-cortex-x2-cpu-part-definition.patch
+arm64-add-id_aa64isar2_el1-sys-register.patch
+arm64-cpufeature-add-hwcap-for-feat_afp.patch
+arm64-cpufeature-add-hwcap-for-feat_rpres.patch
+arm64-entry.s-add-ventry-overflow-sanity-checks.patch
+arm64-spectre-rename-spectre_v4_patch_fw_mitigation_conduit.patch
+kvm-arm64-allow-indirect-vectors-to-be-used-without-spectre_v3a.patch
+arm64-entry-make-the-trampoline-cleanup-optional.patch
+arm64-entry-free-up-another-register-on-kpti-s-tramp_exit-path.patch
+arm64-entry-move-the-trampoline-data-page-before-the-text-page.patch
+arm64-entry-allow-tramp_alias-to-access-symbols-after-the-4k-boundary.patch
+arm64-entry-don-t-assume-tramp_vectors-is-the-start-of-the-vectors.patch
+arm64-entry-move-trampoline-macros-out-of-ifdef-d-section.patch
+arm64-entry-make-the-kpti-trampoline-s-kpti-sequence-optional.patch
+arm64-entry-allow-the-trampoline-text-to-occupy-multiple-pages.patch
+arm64-entry-add-non-kpti-__bp_harden_el1_vectors-for-mitigations.patch
+arm64-entry-add-vectors-that-have-the-bhb-mitigation-sequences.patch
+arm64-entry-add-macro-for-reading-symbol-addresses-from-the-trampoline.patch
+arm64-add-percpu-vectors-for-el1.patch
+arm64-proton-pack-report-spectre-bhb-vulnerabilities-as-part-of-spectre-v2.patch
+arm64-mitigate-spectre-style-branch-history-side-channels.patch
+kvm-arm64-allow-smccc_arch_workaround_3-to-be-discovered-and-migrated.patch
+arm64-use-the-clearbhb-instruction-in-mitigations.patch
+arm64-proton-pack-include-unprivileged-ebpf-status-in-spectre-v2-mitigation-reporting.patch