]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Separate allocation and insertion of pKVM VM table entries
authorFuad Tabba <tabba@google.com>
Tue, 9 Sep 2025 07:24:33 +0000 (08:24 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 15 Sep 2025 09:46:55 +0000 (10:46 +0100)
The current insert_vm_table_entry() function performs two actions at
once: it finds a free slot in the pKVM VM table and populates it with
the pkvm_hyp_vm pointer.

Refactor this function as a preparatory step for future work that will
require reserving a VM slot and its corresponding handle earlier in the
VM lifecycle, before the pkvm_hyp_vm structure is initialized and ready
to be inserted.

Split the function into a two-phase process:

- A new allocate_vm_table_entry() function finds an empty slot, marks it
  as reserved with a RESERVED_ENTRY placeholder, and returns a handle
  derived from the slot's index.

- The insert_vm_table_entry() function is repurposed to take the handle,
  validate that the corresponding slot is in the reserved state, and
  then populate it with the pkvm_hyp_vm pointer.

Signed-off-by: Fuad Tabba <tabba@google.com>
Tested-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 969f6b293234df9a2e81e459a1124c7edd36ab53..64b760d30d0583a591e23bc7a6ae608be5c74812 100644 (file)
@@ -192,6 +192,11 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
  */
 #define HANDLE_OFFSET 0x1000
 
+/*
+ * Marks a reserved but not yet used entry in the VM table.
+ */
+#define RESERVED_ENTRY ((void *)0xa110ca7ed)
+
 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
 {
        return handle - HANDLE_OFFSET;
@@ -231,6 +236,10 @@ static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
        if (unlikely(idx >= KVM_MAX_PVMS))
                return NULL;
 
+       /* A reserved entry doesn't represent an initialized VM. */
+       if (unlikely(vm_table[idx] == RESERVED_ENTRY))
+               return NULL;
+
        return vm_table[idx];
 }
 
@@ -481,7 +490,7 @@ done:
        return ret;
 }
 
-static int find_free_vm_table_entry(struct kvm *host_kvm)
+static int find_free_vm_table_entry(void)
 {
        int i;
 
@@ -494,15 +503,13 @@ static int find_free_vm_table_entry(struct kvm *host_kvm)
 }
 
 /*
- * Allocate a VM table entry and insert a pointer to the new vm.
+ * Reserve a VM table entry.
  *
  * Return a unique handle to the VM on success,
  * negative error code on failure.
  */
-static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
-                                          struct pkvm_hyp_vm *hyp_vm)
+static int allocate_vm_table_entry(void)
 {
-       struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
        int idx;
 
        hyp_assert_lock_held(&vm_table_lock);
@@ -515,10 +522,30 @@ static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
        if (unlikely(!vm_table))
                return -EINVAL;
 
-       idx = find_free_vm_table_entry(host_kvm);
-       if (idx < 0)
+       idx = find_free_vm_table_entry();
+       if (unlikely(idx < 0))
                return idx;
 
+       vm_table[idx] = RESERVED_ENTRY;
+
+       return idx;
+}
+
+/*
+ * Insert a pointer to the new VM into the VM table.
+ *
+ * Return 0 on success, or negative error code on failure.
+ */
+static int insert_vm_table_entry(struct kvm *host_kvm,
+                                struct pkvm_hyp_vm *hyp_vm,
+                                pkvm_handle_t handle)
+{
+       struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
+       unsigned int idx;
+
+       hyp_assert_lock_held(&vm_table_lock);
+
+       idx = vm_handle_to_idx(handle);
        hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
 
        /* VMID 0 is reserved for the host */
@@ -528,7 +555,7 @@ static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
        mmu->pgt = &hyp_vm->pgt;
 
        vm_table[idx] = hyp_vm;
-       return hyp_vm->kvm.arch.pkvm.handle;
+       return 0;
 }
 
 /*
@@ -614,6 +641,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
        struct pkvm_hyp_vm *hyp_vm = NULL;
        size_t vm_size, pgd_size;
        unsigned int nr_vcpus;
+       pkvm_handle_t handle;
        void *pgd = NULL;
        int ret;
 
@@ -643,10 +671,16 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
        init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
 
        hyp_spin_lock(&vm_table_lock);
-       ret = insert_vm_table_entry(host_kvm, hyp_vm);
+       ret = allocate_vm_table_entry();
        if (ret < 0)
                goto err_unlock;
 
+       handle = idx_to_vm_handle(ret);
+
+       ret = insert_vm_table_entry(host_kvm, hyp_vm, handle);
+       if (ret)
+               goto err_unlock;
+
        ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
        if (ret)
                goto err_remove_vm_table_entry;