]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: selftests: Plumb "struct kvm_mmu" into x86's MMU APIs
authorSean Christopherson <seanjc@google.com>
Tue, 30 Dec 2025 23:01:36 +0000 (15:01 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 20:02:08 +0000 (12:02 -0800)
In preparation for generalizing the x86 virt mapping APIs to work with
TDP (stage-2) page tables, plumb "struct kvm_mmu" into all of the helper
functions instead of operating on vm->mmu directly.

Opportunistically swap the order of the check in virt_get_pte() to first
assert that the parent is the PGD, and then check that the PTE is present,
as it makes more sense to check if the parent PTE is the PGD/root (i.e.
not a PTE) before checking that the PTE is PRESENT.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
[sean: rebase on common kvm_mmu structure, rewrite changelog]
Link: https://patch.msgid.link/20251230230150.4150236-8-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86/processor.h
tools/testing/selftests/kvm/lib/x86/processor.c

index 1cb5b4c46b99ab5aaad94fb04d814fbecab8fdf8..43970a96baedb633ac7f6f79b23164ff9cdf3f05 100644 (file)
@@ -1451,7 +1451,8 @@ enum pg_level {
 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
 #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
 
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
+void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
+                  uint64_t paddr,  int level);
 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
                    uint64_t nr_bytes, int level);
 
index f027f86d1535401df3caa6168892742a3e4f63fc..f25742a804b02266116b87373ddc8f897960f8de 100644 (file)
@@ -156,26 +156,31 @@ bool kvm_is_tdp_enabled(void)
                return get_kvm_amd_param_bool("npt");
 }
 
+static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu)
+{
+       /* If needed, create the top-level page table. */
+       if (!mmu->pgd_created) {
+               mmu->pgd = vm_alloc_page_table(vm);
+               mmu->pgd_created = true;
+       }
+}
+
 void virt_arch_pgd_alloc(struct kvm_vm *vm)
 {
        TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
                    "Unknown or unsupported guest mode: 0x%x", vm->mode);
 
-       /* If needed, create the top-level page table. */
-       if (!vm->mmu.pgd_created) {
-               vm->mmu.pgd = vm_alloc_page_table(vm);
-               vm->mmu.pgd_created = true;
-       }
+       virt_mmu_init(vm, &vm->mmu);
 }
 
-static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte,
-                         uint64_t vaddr, int level)
+static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
+                         uint64_t *parent_pte, uint64_t vaddr, int level)
 {
        uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
        uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
        int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
 
-       TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->mmu.pgd,
+       TEST_ASSERT((*parent_pte == mmu->pgd) || (*parent_pte & PTE_PRESENT_MASK),
                    "Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
                    level + 1, vaddr);
 
@@ -183,13 +188,14 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte,
 }
 
 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
+                                      struct kvm_mmu *mmu,
                                       uint64_t *parent_pte,
                                       uint64_t vaddr,
                                       uint64_t paddr,
                                       int current_level,
                                       int target_level)
 {
-       uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
+       uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
 
        paddr = vm_untag_gpa(vm, paddr);
 
@@ -215,10 +221,11 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
        return pte;
 }
 
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
+void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
+                  uint64_t paddr, int level)
 {
        const uint64_t pg_size = PG_LEVEL_SIZE(level);
-       uint64_t *pte = &vm->mmu.pgd;
+       uint64_t *pte = &mmu->pgd;
        int current_level;
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
@@ -243,17 +250,17 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
         * Allocate upper level page tables, if not already present.  Return
         * early if a hugepage was created.
         */
-       for (current_level = vm->mmu.pgtable_levels;
+       for (current_level = mmu->pgtable_levels;
             current_level > PG_LEVEL_4K;
             current_level--) {
-               pte = virt_create_upper_pte(vm, pte, vaddr, paddr,
+               pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
                                            current_level, level);
                if (*pte & PTE_LARGE_MASK)
                        return;
        }
 
        /* Fill in page table entry. */
-       pte = virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
+       pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
        TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
                    "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
        *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
@@ -270,7 +277,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
 
 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
 {
-       __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
+       __virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
 }
 
 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
@@ -285,7 +292,7 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
                    nr_bytes, pg_size);
 
        for (i = 0; i < nr_pages; i++) {
-               __virt_pg_map(vm, vaddr, paddr, level);
+               __virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
                sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
                                  nr_bytes / PAGE_SIZE);
 
@@ -294,7 +301,8 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
        }
 }
 
-static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
+static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
+                            int *level, int current_level)
 {
        if (*pte & PTE_LARGE_MASK) {
                TEST_ASSERT(*level == PG_LEVEL_NONE ||
@@ -306,17 +314,19 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
        return *level == current_level;
 }
 
-static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
+static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
+                                          struct kvm_mmu *mmu,
+                                          uint64_t vaddr,
                                           int *level)
 {
-       int va_width = 12 + (vm->mmu.pgtable_levels) * 9;
-       uint64_t *pte = &vm->mmu.pgd;
+       int va_width = 12 + (mmu->pgtable_levels) * 9;
+       uint64_t *pte = &mmu->pgd;
        int current_level;
 
        TEST_ASSERT(!vm->arch.is_pt_protected,
                    "Walking page tables of protected guests is impossible");
 
-       TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= vm->mmu.pgtable_levels,
+       TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= mmu->pgtable_levels,
                    "Invalid PG_LEVEL_* '%d'", *level);
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
@@ -332,22 +342,22 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
                    (((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
                    "Canonical check failed.  The virtual address is invalid.");
 
-       for (current_level = vm->mmu.pgtable_levels;
+       for (current_level = mmu->pgtable_levels;
             current_level > PG_LEVEL_4K;
             current_level--) {
-               pte = virt_get_pte(vm, pte, vaddr, current_level);
-               if (vm_is_target_pte(pte, level, current_level))
+               pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
+               if (vm_is_target_pte(mmu, pte, level, current_level))
                        return pte;
        }
 
-       return virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
+       return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
 }
 
 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
 {
        int level = PG_LEVEL_4K;
 
-       return __vm_get_page_table_entry(vm, vaddr, &level);
+       return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
 }
 
 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
@@ -497,7 +507,7 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 {
        int level = PG_LEVEL_NONE;
-       uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
+       uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
 
        TEST_ASSERT(*pte & PTE_PRESENT_MASK,
                    "Leaf PTE not PRESENT for gva: 0x%08lx", gva);