]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Hook up donation hypercall to pkvm_pgtable_stage2_map()
authorWill Deacon <will@kernel.org>
Mon, 30 Mar 2026 14:48:14 +0000 (15:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 30 Mar 2026 15:58:08 +0000 (16:58 +0100)
Mapping pages into a protected guest requires the donation of memory
from the host.

Extend pkvm_pgtable_stage2_map() to issue a donate hypercall when the
target VM is protected. Since the hypercall only handles a single page,
the splitting logic used for the share path is not required.

Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-14-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/pkvm.c

index ea7f267ee7ad3b03b418dad74b422814a0ff6ee7..7d0fe36fd8dc60e36b17c958f535b141c08a1e2d 100644 (file)
@@ -379,31 +379,55 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
        struct kvm_hyp_memcache *cache = mc;
        u64 gfn = addr >> PAGE_SHIFT;
        u64 pfn = phys >> PAGE_SHIFT;
+       u64 end = addr + size;
        int ret;
 
-       if (size != PAGE_SIZE && size != PMD_SIZE)
-               return -EINVAL;
-
        lockdep_assert_held_write(&kvm->mmu_lock);
+       mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, end - 1);
 
-       /*
-        * Calling stage2_map() on top of existing mappings is either happening because of a race
-        * with another vCPU, or because we're changing between page and block mappings. As per
-        * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
-        */
-       mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1);
-       if (mapping) {
-               if (size == (mapping->nr_pages * PAGE_SIZE))
+       if (kvm_vm_is_protected(kvm)) {
+               /* Protected VMs are mapped using RWX page-granular mappings */
+               if (WARN_ON_ONCE(size != PAGE_SIZE))
+                       return -EINVAL;
+
+               if (WARN_ON_ONCE(prot != KVM_PGTABLE_PROT_RWX))
+                       return -EINVAL;
+
+               /*
+                * We raced with another vCPU.
+                */
+               if (mapping)
                        return -EAGAIN;
 
-               /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
-               ret = __pkvm_pgtable_stage2_unshare(pgt, addr, addr + size);
-               if (ret)
-                       return ret;
-               mapping = NULL;
+               ret = kvm_call_hyp_nvhe(__pkvm_host_donate_guest, pfn, gfn);
+       } else {
+               if (WARN_ON_ONCE(size != PAGE_SIZE && size != PMD_SIZE))
+                       return -EINVAL;
+
+               /*
+                * We either raced with another vCPU or we're changing between
+                * page and block mappings. As per user_mem_abort(), same-size
+                * permission faults are handled in the relax_perms() path.
+                */
+               if (mapping) {
+                       if (size == (mapping->nr_pages * PAGE_SIZE))
+                               return -EAGAIN;
+
+                       /*
+                        * Remove _any_ pkvm_mapping overlapping with the range,
+                        * bigger or smaller.
+                        */
+                       ret = __pkvm_pgtable_stage2_unshare(pgt, addr, end);
+                       if (ret)
+                               return ret;
+
+                       mapping = NULL;
+               }
+
+               ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn,
+                                       size / PAGE_SIZE, prot);
        }
 
-       ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot);
        if (WARN_ON(ret))
                return ret;