]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Rename 'host_kvm' to 'kvm' in pKVM host code
authorFuad Tabba <tabba@google.com>
Tue, 9 Sep 2025 07:24:30 +0000 (08:24 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 15 Sep 2025 09:46:55 +0000 (10:46 +0100)
In hypervisor (EL2) code, it is important to distinguish between the
host's 'struct kvm' and a protected VM's 'struct kvm'. Using 'host_kvm'
as variable name in that context makes this distinction clear.

However, in the host kernel code (EL1), there is no such ambiguity. The
code is only ever concerned with the host's own 'struct kvm' instance.
The 'host_' prefix is therefore redundant and adds unnecessary
verbosity.

Simplify the code by renaming the 'host_kvm' parameter to 'kvm' in all
functions within host-side kernel code (EL1). This improves readability
and makes the naming consistent with other host-side kernel code.

No functional change intended.

Signed-off-by: Fuad Tabba <tabba@google.com>
Tested-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/pkvm.c

index fcd70bfe44fb8cce247900c77721a202b46563d8..7aaeb66e3f394f96ddf779e053ad4456f2ad07ae 100644 (file)
@@ -85,16 +85,16 @@ void __init kvm_hyp_reserve(void)
                 hyp_mem_base);
 }
 
-static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+static void __pkvm_destroy_hyp_vm(struct kvm *kvm)
 {
-       if (host_kvm->arch.pkvm.handle) {
+       if (kvm->arch.pkvm.handle) {
                WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
-                                         host_kvm->arch.pkvm.handle));
+                                         kvm->arch.pkvm.handle));
        }
 
-       host_kvm->arch.pkvm.handle = 0;
-       free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
-       free_hyp_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc);
+       kvm->arch.pkvm.handle = 0;
+       free_hyp_memcache(&kvm->arch.pkvm.teardown_mc);
+       free_hyp_memcache(&kvm->arch.pkvm.stage2_teardown_mc);
 }
 
 static int __pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
@@ -129,16 +129,16 @@ static int __pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
  *
  * Return 0 on success, negative error code on failure.
  */
-static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
+static int __pkvm_create_hyp_vm(struct kvm *kvm)
 {
        size_t pgd_sz, hyp_vm_sz;
        void *pgd, *hyp_vm;
        int ret;
 
-       if (host_kvm->created_vcpus < 1)
+       if (kvm->created_vcpus < 1)
                return -EINVAL;
 
-       pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr);
+       pgd_sz = kvm_pgtable_stage2_pgd_size(kvm->arch.mmu.vtcr);
 
        /*
         * The PGD pages will be reclaimed using a hyp_memcache which implies
@@ -152,7 +152,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
        /* Allocate memory to donate to hyp for vm and vcpu pointers. */
        hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
                                        size_mul(sizeof(void *),
-                                                host_kvm->created_vcpus)));
+                                                kvm->created_vcpus)));
        hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
        if (!hyp_vm) {
                ret = -ENOMEM;
@@ -160,12 +160,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
        }
 
        /* Donate the VM memory to hyp and let hyp initialize it. */
-       ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd);
+       ret = kvm_call_hyp_nvhe(__pkvm_init_vm, kvm, hyp_vm, pgd);
        if (ret < 0)
                goto free_vm;
 
-       host_kvm->arch.pkvm.handle = ret;
-       host_kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
+       kvm->arch.pkvm.handle = ret;
+       kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
        kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
 
        return 0;
@@ -176,14 +176,14 @@ free_pgd:
        return ret;
 }
 
-int pkvm_create_hyp_vm(struct kvm *host_kvm)
+int pkvm_create_hyp_vm(struct kvm *kvm)
 {
        int ret = 0;
 
-       mutex_lock(&host_kvm->arch.config_lock);
-       if (!host_kvm->arch.pkvm.handle)
-               ret = __pkvm_create_hyp_vm(host_kvm);
-       mutex_unlock(&host_kvm->arch.config_lock);
+       mutex_lock(&kvm->arch.config_lock);
+       if (!kvm->arch.pkvm.handle)
+               ret = __pkvm_create_hyp_vm(kvm);
+       mutex_unlock(&kvm->arch.config_lock);
 
        return ret;
 }
@@ -200,14 +200,14 @@ int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+void pkvm_destroy_hyp_vm(struct kvm *kvm)
 {
-       mutex_lock(&host_kvm->arch.config_lock);
-       __pkvm_destroy_hyp_vm(host_kvm);
-       mutex_unlock(&host_kvm->arch.config_lock);
+       mutex_lock(&kvm->arch.config_lock);
+       __pkvm_destroy_hyp_vm(kvm);
+       mutex_unlock(&kvm->arch.config_lock);
 }
 
-int pkvm_init_host_vm(struct kvm *host_kvm)
+int pkvm_init_host_vm(struct kvm *kvm)
 {
        return 0;
 }