]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: GICv3: Detect and work around the lack of ICV_DIR_EL1 trapping
authorMarc Zyngier <maz@kernel.org>
Thu, 20 Nov 2025 17:24:56 +0000 (17:24 +0000)
committerOliver Upton <oupton@kernel.org>
Mon, 24 Nov 2025 22:29:11 +0000 (14:29 -0800)
A long time ago, an unsuspecting architect forgot to add a trap
bit for ICV_DIR_EL1 in ICH_HCR_EL2. Which was unfortunate, but
what's a bit of spec between friends? Thankfully, this was fixed
in a later revision, and ARM "deprecates" the lack of trapping
ability.

Unfortuantely, a few (billion) CPUs went out with that defect,
anything ARMv8.0 from ARM, give or take. And on these CPUs,
you can't trap DIR on its own, full stop.

As the next best thing, we can trap everything in the common group,
which is a tad expensive, but hey ho, that's what you get. You can
otherwise recycle the HW in the neaby bin.

Tested-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://msgid.link/20251120172540.2267180-7-maz@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>
arch/arm64/include/asm/virt.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/hyp-stub.S
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/tools/cpucaps

index aa280f356b96a4f62d2b8f89cf8fbfae5249c271..8eb63d3294974cce8140a61bc6fa92ce0bdb8a7b 100644 (file)
  */
 #define HVC_FINALISE_EL2       3
 
+/*
+ * HVC_GET_ICH_VTR_EL2 - Retrieve the ICH_VTR_EL2 value
+ */
+#define HVC_GET_ICH_VTR_EL2    4
+
 /* Max number of HYP stub hypercalls */
-#define HVC_STUB_HCALL_NR 4
+#define HVC_STUB_HCALL_NR 5
 
 /* Error returned when an invalid stub number is passed into x0 */
 #define HVC_STUB_ERR   0xbadca11
index 5ed401ff79e3e388b63825acbd156546cf91e103..5de51cb1b8fe21313023e5b03f7405eb113cbd87 100644 (file)
@@ -2303,6 +2303,49 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry
 }
 #endif
 
+static bool can_trap_icv_dir_el1(const struct arm64_cpu_capabilities *entry,
+                                int scope)
+{
+       static const struct midr_range has_vgic_v3[] = {
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+               MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
+               {},
+       };
+       struct arm_smccc_res res = {};
+
+       BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV3_CPUIF);
+       BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV5_LEGACY);
+       if (!cpus_have_cap(ARM64_HAS_GICV3_CPUIF) &&
+           !is_midr_in_range_list(has_vgic_v3))
+               return false;
+
+       if (!is_hyp_mode_available())
+               return false;
+
+       if (cpus_have_cap(ARM64_HAS_GICV5_LEGACY))
+               return true;
+
+       if (is_kernel_in_hyp_mode())
+               res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2);
+       else
+               arm_smccc_1_1_hvc(HVC_GET_ICH_VTR_EL2, &res);
+
+       if (res.a0 == HVC_STUB_ERR)
+               return false;
+
+       return res.a1 & ICH_VTR_EL2_TDS;
+}
+
 #ifdef CONFIG_ARM64_BTI
 static void bti_enable(const struct arm64_cpu_capabilities *__unused)
 {
@@ -2814,6 +2857,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = has_gic_prio_relaxed_sync,
        },
 #endif
+       {
+               /*
+                * Depends on having GICv3
+                */
+               .desc = "ICV_DIR_EL1 trapping",
+               .capability = ARM64_HAS_ICH_HCR_EL2_TDIR,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = can_trap_icv_dir_el1,
+       },
 #ifdef CONFIG_ARM64_E0PD
        {
                .desc = "E0PD",
index 36e2d26b54f5c2225c72caf75a2da96bc39d282f..085bc9972f6bb8c5f569975c98c783b9329f9649 100644 (file)
@@ -54,6 +54,11 @@ SYM_CODE_START_LOCAL(elx_sync)
 1:     cmp     x0, #HVC_FINALISE_EL2
        b.eq    __finalise_el2
 
+       cmp     x0, #HVC_GET_ICH_VTR_EL2
+       b.ne    2f
+       mrs_s   x1, SYS_ICH_VTR_EL2
+       b       9f
+
 2:     cmp     x0, #HVC_SOFT_RESTART
        b.ne    3f
        mov     x0, x2
index 8c14945086821308325cd68ab3693b1637d1c068..1b6c3071ec80f2d5a817a656b2494fd7f522cf84 100644 (file)
@@ -648,6 +648,9 @@ void noinstr kvm_compute_ich_hcr_trap_bits(struct alt_instr *alt,
                dir_trap = true;
        }
 
+       if (!cpus_have_cap(ARM64_HAS_ICH_HCR_EL2_TDIR))
+               common_trap = true;
+
        if (group0_trap)
                hcr |= ICH_HCR_EL2_TALL0;
        if (group1_trap)
index 1b32c1232d28d86e44f99e3bde3d118624d33aec..116d1a7b688cbf41fcb9c52658262d8201872b95 100644 (file)
@@ -40,6 +40,7 @@ HAS_GICV5_CPUIF
 HAS_GICV5_LEGACY
 HAS_GIC_PRIO_MASKING
 HAS_GIC_PRIO_RELAXED_SYNC
+HAS_ICH_HCR_EL2_TDIR
 HAS_HCR_NV1
 HAS_HCX
 HAS_LDAPR