From: Fuad Tabba Date: Tue, 9 Sep 2025 07:24:32 +0000 (+0100) Subject: KVM: arm64: Decouple hyp VM creation state from its handle X-Git-Tag: v6.18-rc1~95^2~5^2~9^2~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=3c45b67625357ac680ee2508493b697cdcd78128;p=thirdparty%2Flinux.git KVM: arm64: Decouple hyp VM creation state from its handle Currently, the presence of a pKVM handle (pkvm.handle != 0) is used to determine if the corresponding hypervisor (EL2) VM has been created and initialized. This couples the handle's lifecycle with the VM's creation state. This coupling will become problematic with upcoming changes that will allocate the pKVM handle earlier in the VM's life, before the VM is instantiated at the hypervisor. To prepare for this and make the state tracking explicit, decouple the two concepts. Introduce a new boolean flag, 'pkvm.is_created', to track whether the hypervisor-side VM has been created and initialized. A new helper, pkvm_hyp_vm_is_created(), is added to check this flag. All call sites that previously checked for the handle's existence are converted to use the new, explicit check. The 'is_created' flag is set to true upon successful creation in the hypervisor (EL2) and cleared upon destruction. Signed-off-by: Fuad Tabba Tested-by: Mark Brown Signed-off-by: Marc Zyngier --- diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a4289c2f13f5..bc57749e3fb9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -253,6 +253,7 @@ struct kvm_protected_vm { struct kvm_hyp_memcache teardown_mc; struct kvm_hyp_memcache stage2_teardown_mc; bool is_protected; + bool is_created; }; struct kvm_mpidr_data { diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index ea58282f59bb..08be89c95466 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -18,6 +18,7 @@ int pkvm_init_host_vm(struct kvm *kvm); int pkvm_create_hyp_vm(struct kvm *kvm); +bool pkvm_hyp_vm_is_created(struct kvm *kvm); void pkvm_destroy_hyp_vm(struct kvm *kvm); int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index abe173406c88..969f6b293234 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -407,6 +407,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, hyp_vm->kvm.created_vcpus = nr_vcpus; hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; hyp_vm->kvm.arch.pkvm.is_protected = READ_ONCE(host_kvm->arch.pkvm.is_protected); + hyp_vm->kvm.arch.pkvm.is_created = true; hyp_vm->kvm.arch.flags = 0; pkvm_init_features_from_host(hyp_vm, host_kvm); } diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 7aaeb66e3f39..45d699bba96a 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -87,12 +87,13 @@ void __init kvm_hyp_reserve(void) static void __pkvm_destroy_hyp_vm(struct kvm *kvm) { - if (kvm->arch.pkvm.handle) { + if (pkvm_hyp_vm_is_created(kvm)) { WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm, kvm->arch.pkvm.handle)); } kvm->arch.pkvm.handle = 0; + kvm->arch.pkvm.is_created = false; free_hyp_memcache(&kvm->arch.pkvm.teardown_mc); free_hyp_memcache(&kvm->arch.pkvm.stage2_teardown_mc); } @@ -165,6 +166,7 @@ static int __pkvm_create_hyp_vm(struct kvm *kvm) goto free_vm; kvm->arch.pkvm.handle = ret; + kvm->arch.pkvm.is_created = true; kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2; kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE); @@ -176,12 +178,17 @@ free_pgd: return ret; } +bool pkvm_hyp_vm_is_created(struct kvm *kvm) +{ + return READ_ONCE(kvm->arch.pkvm.is_created); +} + int pkvm_create_hyp_vm(struct kvm *kvm) { int ret = 0; mutex_lock(&kvm->arch.config_lock); - if (!kvm->arch.pkvm.handle) + if (!pkvm_hyp_vm_is_created(kvm)) ret = __pkvm_create_hyp_vm(kvm); mutex_unlock(&kvm->arch.config_lock);