]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Hook up reclaim hypercall to pkvm_pgtable_stage2_destroy()
authorWill Deacon <will@kernel.org>
Mon, 30 Mar 2026 14:48:17 +0000 (15:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 30 Mar 2026 15:58:08 +0000 (16:58 +0100)
During teardown of a protected guest, its memory pages must be reclaimed
from the hypervisor by issuing the '__pkvm_reclaim_dying_guest_page'
hypercall.

Add a new helper, __pkvm_pgtable_stage2_reclaim(), which is called
during the VM teardown operation to reclaim pages from the hypervisor
and drop the GUP pin on the host.

Tested-by: Fuad Tabba <tabba@google.com>
Tested-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20260330144841.26181-17-will@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/pkvm.c

index 7d0fe36fd8dc60e36b17c958f535b141c08a1e2d..3cf23496f2257315312524ccda94528ceee795df 100644 (file)
@@ -328,6 +328,32 @@ int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
        return 0;
 }
 
+static int __pkvm_pgtable_stage2_reclaim(struct kvm_pgtable *pgt, u64 start, u64 end)
+{
+       struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
+       pkvm_handle_t handle = kvm->arch.pkvm.handle;
+       struct pkvm_mapping *mapping;
+       int ret;
+
+       for_each_mapping_in_range_safe(pgt, start, end, mapping) {
+               struct page *page;
+
+               ret = kvm_call_hyp_nvhe(__pkvm_reclaim_dying_guest_page,
+                                       handle, mapping->gfn);
+               if (WARN_ON(ret))
+                       continue;
+
+               page = pfn_to_page(mapping->pfn);
+               WARN_ON_ONCE(mapping->nr_pages != 1);
+               unpin_user_pages_dirty_lock(&page, 1, true);
+               account_locked_vm(current->mm, 1, false);
+               pkvm_mapping_remove(mapping, &pgt->pkvm_mappings);
+               kfree(mapping);
+       }
+
+       return 0;
+}
+
 static int __pkvm_pgtable_stage2_unshare(struct kvm_pgtable *pgt, u64 start, u64 end)
 {
        struct kvm *kvm = kvm_s2_mmu_to_kvm(pgt->mmu);
@@ -361,7 +387,10 @@ void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
                kvm->arch.pkvm.is_dying = true;
        }
 
-       __pkvm_pgtable_stage2_unshare(pgt, addr, addr + size);
+       if (kvm_vm_is_protected(kvm))
+               __pkvm_pgtable_stage2_reclaim(pgt, addr, addr + size);
+       else
+               __pkvm_pgtable_stage2_unshare(pgt, addr, addr + size);
 }
 
 void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt)