hyp_mem_base);
}
-static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+static void __pkvm_destroy_hyp_vm(struct kvm *kvm)
{
- if (host_kvm->arch.pkvm.handle) {
+ if (kvm->arch.pkvm.handle) {
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
- host_kvm->arch.pkvm.handle));
+ kvm->arch.pkvm.handle));
}
- host_kvm->arch.pkvm.handle = 0;
- free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
- free_hyp_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc);
+ kvm->arch.pkvm.handle = 0;
+ free_hyp_memcache(&kvm->arch.pkvm.teardown_mc);
+ free_hyp_memcache(&kvm->arch.pkvm.stage2_teardown_mc);
}
static int __pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
*
* Return 0 on success, negative error code on failure.
*/
-static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
+static int __pkvm_create_hyp_vm(struct kvm *kvm)
{
size_t pgd_sz, hyp_vm_sz;
void *pgd, *hyp_vm;
int ret;
- if (host_kvm->created_vcpus < 1)
+ if (kvm->created_vcpus < 1)
return -EINVAL;
- pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr);
+ pgd_sz = kvm_pgtable_stage2_pgd_size(kvm->arch.mmu.vtcr);
/*
* The PGD pages will be reclaimed using a hyp_memcache which implies
/* Allocate memory to donate to hyp for vm and vcpu pointers. */
hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
size_mul(sizeof(void *),
- host_kvm->created_vcpus)));
+ kvm->created_vcpus)));
hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
if (!hyp_vm) {
ret = -ENOMEM;
}
/* Donate the VM memory to hyp and let hyp initialize it. */
- ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd);
+ ret = kvm_call_hyp_nvhe(__pkvm_init_vm, kvm, hyp_vm, pgd);
if (ret < 0)
goto free_vm;
- host_kvm->arch.pkvm.handle = ret;
- host_kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
+ kvm->arch.pkvm.handle = ret;
+ kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
return 0;
return ret;
}
-int pkvm_create_hyp_vm(struct kvm *host_kvm)
+int pkvm_create_hyp_vm(struct kvm *kvm)
{
int ret = 0;
- mutex_lock(&host_kvm->arch.config_lock);
- if (!host_kvm->arch.pkvm.handle)
- ret = __pkvm_create_hyp_vm(host_kvm);
- mutex_unlock(&host_kvm->arch.config_lock);
+ mutex_lock(&kvm->arch.config_lock);
+ if (!kvm->arch.pkvm.handle)
+ ret = __pkvm_create_hyp_vm(kvm);
+ mutex_unlock(&kvm->arch.config_lock);
return ret;
}
return ret;
}
-void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+void pkvm_destroy_hyp_vm(struct kvm *kvm)
{
- mutex_lock(&host_kvm->arch.config_lock);
- __pkvm_destroy_hyp_vm(host_kvm);
- mutex_unlock(&host_kvm->arch.config_lock);
+ mutex_lock(&kvm->arch.config_lock);
+ __pkvm_destroy_hyp_vm(kvm);
+ mutex_unlock(&kvm->arch.config_lock);
}
-int pkvm_init_host_vm(struct kvm *host_kvm)
+int pkvm_init_host_vm(struct kvm *kvm)
{
return 0;
}