]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Work around C1-Pro erratum 4193714 for protected guests
authorJames Morse <james.morse@arm.com>
Tue, 5 May 2026 16:52:03 +0000 (17:52 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 6 May 2026 16:08:39 +0000 (17:08 +0100)
C1-Pro cores with SME have an erratum where TLBI+DSB does not complete
all outstanding SME accesses. Instead a DSB needs to be executed on the
affected CPUs. The implication is that pages cannot be unmapped from the
host Stage 2 and then provided to a protected guest or to the
hypervisor. Host SME accesses may still complete after this point.

This erratum breaks pKVM's guarantees, and the workaround is hard to
implement as EL2 and EL1 share a security state meaning EL1 can mask
IPIs sent by EL2, leading to interrupt blackouts.

Instead, do this in EL3. This has the advantage of a separate security
state, meaning lower EL cannot mask the IPI. It is also simpler for EL3
to know about CPUs that are off or in PSCI's CPU_SUSPEND.

Add the needed hook to host_stage2_set_owner_metadata_locked(). This
covers the cases where the host loses access to a page:

  __pkvm_host_donate_guest()
  __pkvm_guest_unshare_host()
  host_stage2_set_owner_locked() when owner_id == PKVM_ID_HYP

Since pKVM relies on the firmware call for correctness, check for the
firmware counterpart during protected KVM initialisation and fail the
pKVM initialisation if it is missing.

Signed-off-by: James Morse <james.morse@arm.com>
Co-developed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oupton@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Vincent Donnefort <vdonnefort@google.com>
Cc: Lorenzo Pieralisi <lpieralisi@kernel.org>
Cc: Sudeep Holla <sudeep.holla@kernel.org>
Link: https://patch.msgid.link/20260505165205.2690919-1-catalin.marinas@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
include/linux/arm-smccc.h

index 8bb2c7422cc8b0ad1c6952047cbb878af57fcaca..34c9950884d5e6c8292cd722df3d59397ac526f0 100644 (file)
@@ -4,6 +4,7 @@
  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/bug.h>
 #include <linux/cpu_pm.h>
 #include <linux/errno.h>
@@ -2638,6 +2639,22 @@ static int init_pkvm_host_sve_state(void)
        return 0;
 }
 
+static int pkvm_check_sme_dvmsync_fw_call(void)
+{
+       struct arm_smccc_res res;
+
+       if (!cpus_have_final_cap(ARM64_WORKAROUND_4193714))
+               return 0;
+
+       arm_smccc_1_1_smc(ARM_SMCCC_CPU_WORKAROUND_4193714, &res);
+       if (res.a0) {
+               kvm_err("pKVM requires firmware support for C1-Pro erratum 4193714\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 /*
  * Finalizes the initialization of hyp mode, once everything else is initialized
  * and the initialziation process cannot fail.
@@ -2838,6 +2855,10 @@ static int __init init_hyp_mode(void)
                if (err)
                        goto out_err;
 
+               err = pkvm_check_sme_dvmsync_fw_call();
+               if (err)
+                       goto out_err;
+
                err = kvm_hyp_init_protection(hyp_va_bits);
                if (err) {
                        kvm_err("Failed to init hyp memory protection\n");
index 28a471d1927cd50f908ea0dc14b8f583700d1cd8..a3050e2b65b13d2399abe67d7dc264578951dd81 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/kvm_host.h>
+
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
@@ -14,6 +15,7 @@
 
 #include <hyp/fault.h>
 
+#include <nvhe/arm-smccc.h>
 #include <nvhe/gfp.h>
 #include <nvhe/memory.h>
 #include <nvhe/mem_protect.h>
@@ -29,6 +31,19 @@ static struct hyp_pool host_s2_pool;
 static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
 #define current_vm (*this_cpu_ptr(&__current_vm))
 
+static void pkvm_sme_dvmsync_fw_call(void)
+{
+       if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) {
+               struct arm_smccc_res res;
+
+               /*
+                * Ignore the return value. Probing for the workaround
+                * availability took place in init_hyp_mode().
+                */
+               hyp_smccc_1_1_smc(ARM_SMCCC_CPU_WORKAROUND_4193714, &res);
+       }
+}
+
 static void guest_lock_component(struct pkvm_hyp_vm *vm)
 {
        hyp_spin_lock(&vm->lock);
@@ -574,8 +589,14 @@ static int host_stage2_set_owner_metadata_locked(phys_addr_t addr, u64 size,
        ret = host_stage2_try(kvm_pgtable_stage2_annotate, &host_mmu.pgt,
                              addr, size, &host_s2_pool,
                              KVM_HOST_INVALID_PTE_TYPE_DONATION, annotation);
-       if (!ret)
+       if (!ret) {
+               /*
+                * After stage2 maintenance has happened, but before the page
+                * owner has changed.
+                */
+               pkvm_sme_dvmsync_fw_call();
                __host_update_page_state(addr, size, PKVM_NOPAGE);
+       }
 
        return ret;
 }
index 50b47eba7d01520e8d27552cd8249e7796d227fc..e7195750d21bb41eb4c42a4e26635b2c2504aa96 100644 (file)
                           ARM_SMCCC_SMC_32,                            \
                           0, 0x3fff)
 
+/* C1-Pro erratum 4193714: SME DVMSync early acknowledgement */
+#define ARM_SMCCC_CPU_WORKAROUND_4193714                               \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+                          ARM_SMCCC_SMC_32,                            \
+                          ARM_SMCCC_OWNER_CPU, 0x10)
+
 #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID                          \
        ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
                           ARM_SMCCC_SMC_32,                            \