]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Reserve pKVM handle during pkvm_init_host_vm()
authorFuad Tabba <tabba@google.com>
Tue, 9 Sep 2025 07:24:36 +0000 (08:24 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 15 Sep 2025 09:46:55 +0000 (10:46 +0100)
When a pKVM guest is active, TLB invalidations triggered by host MMU
notifiers require a valid hypervisor handle. Currently, this handle is
only allocated when the first vCPU is run.

However, the guest's memory is associated with the host MMU much
earlier, during kvm_arch_init_vm(). This creates a window where an MMU
invalidation could occur after the kvm_pgtable pointer checked by the
notifiers is set but before the pKVM handle has been created.

Fix this by reserving the pKVM handle when the host VM is first set up.
Move the call to the __pkvm_reserve_vm hypercall from the first-vCPU-run
path into pkvm_init_host_vm(), which is called during initial VM setup.
This ensures the handle is available before any subsystem can trigger an
MMU notification for the VM.

The VM destruction path is updated to call __pkvm_unreserve_vm for cases
where a VM was reserved but never fully created at the hypervisor,
ensuring the handle is properly released.

This fix leverages the two-stage reservation/initialization hypercall
interface introduced in preceding patches.

Signed-off-by: Fuad Tabba <tabba@google.com>
Tested-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/arm.c
arch/arm64/kvm/pkvm.c

index 888f7c7abf547a011331082a23f5799b1f6fab73..1849bdede4f2b8b1c389e6a15dc8c0f1f971d26f 100644 (file)
@@ -170,10 +170,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                return ret;
 
-       ret = pkvm_init_host_vm(kvm);
-       if (ret)
-               goto err_unshare_kvm;
-
        if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
                ret = -ENOMEM;
                goto err_unshare_kvm;
@@ -184,6 +180,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                goto err_free_cpumask;
 
+       if (is_protected_kvm_enabled()) {
+               /*
+                * If any failures occur after this is successful, make sure to
+                * call __pkvm_unreserve_vm to unreserve the VM in hyp.
+                */
+               ret = pkvm_init_host_vm(kvm);
+               if (ret)
+                       goto err_free_cpumask;
+       }
+
        kvm_vgic_early_init(kvm);
 
        kvm_timer_init_vm(kvm);
index 082bc15f436cd68b56762593bf36c0d1555a2897..24f0f8a8c943cc9b70d88220927b6623c6251f27 100644 (file)
@@ -90,6 +90,12 @@ static void __pkvm_destroy_hyp_vm(struct kvm *kvm)
        if (pkvm_hyp_vm_is_created(kvm)) {
                WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
                                          kvm->arch.pkvm.handle));
+       } else if (kvm->arch.pkvm.handle) {
+               /*
+                * The VM could have been reserved but hyp initialization has
+                * failed. Make sure to unreserve it.
+                */
+               kvm_call_hyp_nvhe(__pkvm_unreserve_vm, kvm->arch.pkvm.handle);
        }
 
        kvm->arch.pkvm.handle = 0;
@@ -160,25 +166,16 @@ static int __pkvm_create_hyp_vm(struct kvm *kvm)
                goto free_pgd;
        }
 
-       /* Reserve the VM in hyp and obtain a hyp handle for the VM. */
-       ret = kvm_call_hyp_nvhe(__pkvm_reserve_vm);
-       if (ret < 0)
-               goto free_vm;
-
-       kvm->arch.pkvm.handle = ret;
-
        /* Donate the VM memory to hyp and let hyp initialize it. */
        ret = kvm_call_hyp_nvhe(__pkvm_init_vm, kvm, hyp_vm, pgd);
        if (ret)
-               goto unreserve_vm;
+               goto free_vm;
 
        kvm->arch.pkvm.is_created = true;
        kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
        kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
 
        return 0;
-unreserve_vm:
-       kvm_call_hyp_nvhe(__pkvm_unreserve_vm, kvm->arch.pkvm.handle);
 free_vm:
        free_pages_exact(hyp_vm, hyp_vm_sz);
 free_pgd:
@@ -224,6 +221,22 @@ void pkvm_destroy_hyp_vm(struct kvm *kvm)
 
 int pkvm_init_host_vm(struct kvm *kvm)
 {
+       int ret;
+
+       if (pkvm_hyp_vm_is_created(kvm))
+               return -EINVAL;
+
+       /* VM is already reserved, no need to proceed. */
+       if (kvm->arch.pkvm.handle)
+               return 0;
+
+       /* Reserve the VM in hyp and obtain a hyp handle for the VM. */
+       ret = kvm_call_hyp_nvhe(__pkvm_reserve_vm);
+       if (ret < 0)
+               return ret;
+
+       kvm->arch.pkvm.handle = ret;
+
        return 0;
 }