]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: selftests: Rename nested TDP mapping functions
authorYosry Ahmed <yosry.ahmed@linux.dev>
Tue, 30 Dec 2025 23:01:32 +0000 (15:01 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 20:02:06 +0000 (12:02 -0800)
Rename the functions from nested_* to tdp_* to make their purpose
clearer.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251230230150.4150236-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86/vmx.h
tools/testing/selftests/kvm/lib/x86/memstress.c
tools/testing/selftests/kvm/lib/x86/vmx.c
tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c

index 91916b8aa94b1bb76ef69c57864f5dfb430eb18c..04b8231d032a02689bca19f45d06cc20b01b9b38 100644 (file)
@@ -559,14 +559,14 @@ bool load_vmcs(struct vmx_pages *vmx);
 
 bool ept_1g_pages_supported(void);
 
-void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                  uint64_t nested_paddr, uint64_t paddr);
-void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                uint64_t nested_paddr, uint64_t paddr, uint64_t size);
-void nested_identity_map_default_memslots(struct vmx_pages *vmx,
-                                         struct kvm_vm *vm);
-void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
-                           uint64_t addr, uint64_t size);
+void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr,
+               uint64_t paddr);
+void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr,
+            uint64_t paddr, uint64_t size);
+void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
+                                      struct kvm_vm *vm);
+void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
+                        uint64_t addr, uint64_t size);
 bool kvm_cpu_has_ept(void);
 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm);
 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
index 0b1f288ad5564c9ae947311356766cb65820d135..1928b00bde5174e741480717bd8863c2e77676a2 100644 (file)
@@ -70,11 +70,11 @@ void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
         * KVM can shadow the EPT12 with the maximum huge page size supported
         * by the backing source.
         */
-       nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
+       tdp_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
 
        start = align_down(memstress_args.gpa, PG_SIZE_1G);
        end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
-       nested_identity_map_1g(vmx, vm, start, end - start);
+       tdp_identity_map_1g(vmx, vm, start, end - start);
 }
 
 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
index eec33ec63811b462b8ca3fed1ee39599a475d086..1954ccdfc35389310943b3d3fac99ad5cda47533 100644 (file)
@@ -362,12 +362,12 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
 
-static void nested_create_pte(struct kvm_vm *vm,
-                             struct eptPageTableEntry *pte,
-                             uint64_t nested_paddr,
-                             uint64_t paddr,
-                             int current_level,
-                             int target_level)
+static void tdp_create_pte(struct kvm_vm *vm,
+                          struct eptPageTableEntry *pte,
+                          uint64_t nested_paddr,
+                          uint64_t paddr,
+                          int current_level,
+                          int target_level)
 {
        if (!pte->readable) {
                pte->writable = true;
@@ -394,8 +394,8 @@ static void nested_create_pte(struct kvm_vm *vm,
 }
 
 
-void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                    uint64_t nested_paddr, uint64_t paddr, int target_level)
+void __tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                 uint64_t nested_paddr, uint64_t paddr, int target_level)
 {
        const uint64_t page_size = PG_LEVEL_SIZE(target_level);
        struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
@@ -428,7 +428,7 @@ void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
                index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
                pte = &pt[index];
 
-               nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
+               tdp_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
 
                if (pte->page_size)
                        break;
@@ -445,10 +445,10 @@ void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
 
 }
 
-void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                  uint64_t nested_paddr, uint64_t paddr)
+void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+               uint64_t nested_paddr, uint64_t paddr)
 {
-       __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
+       __tdp_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
 }
 
 /*
@@ -468,8 +468,8 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
  * Within the VM given by vm, creates a nested guest translation for the
  * page range starting at nested_paddr to the page range starting at paddr.
  */
-void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+void __tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+              uint64_t nested_paddr, uint64_t paddr, uint64_t size,
                  int level)
 {
        size_t page_size = PG_LEVEL_SIZE(level);
@@ -479,23 +479,23 @@ void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
        TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
 
        while (npages--) {
-               __nested_pg_map(vmx, vm, nested_paddr, paddr, level);
+               __tdp_pg_map(vmx, vm, nested_paddr, paddr, level);
                nested_paddr += page_size;
                paddr += page_size;
        }
 }
 
-void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-               uint64_t nested_paddr, uint64_t paddr, uint64_t size)
+void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+            uint64_t nested_paddr, uint64_t paddr, uint64_t size)
 {
-       __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
+       __tdp_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
 }
 
 /* Prepare an identity extended page table that maps all the
  * physical pages in VM.
  */
-void nested_identity_map_default_memslots(struct vmx_pages *vmx,
-                                         struct kvm_vm *vm)
+void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
+                                      struct kvm_vm *vm)
 {
        uint32_t s, memslot = 0;
        sparsebit_idx_t i, last;
@@ -512,18 +512,16 @@ void nested_identity_map_default_memslots(struct vmx_pages *vmx,
                if (i > last)
                        break;
 
-               nested_map(vmx, vm,
-                          (uint64_t)i << vm->page_shift,
-                          (uint64_t)i << vm->page_shift,
-                          1 << vm->page_shift);
+               tdp_map(vmx, vm, (uint64_t)i << vm->page_shift,
+                       (uint64_t)i << vm->page_shift, 1 << vm->page_shift);
        }
 }
 
 /* Identity map a region with 1GiB Pages. */
-void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
+void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
                            uint64_t addr, uint64_t size)
 {
-       __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
+       __tdp_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
 }
 
 bool kvm_cpu_has_ept(void)
index aab7333aaef051c11c028132161830534e519243..e7d0c08ba29d267696b9a160075db86f5a2c8c9b 100644 (file)
@@ -121,9 +121,9 @@ static void test_vmx_dirty_log(bool enable_ept)
         */
        if (enable_ept) {
                prepare_eptp(vmx, vm);
-               nested_identity_map_default_memslots(vmx, vm);
-               nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
-               nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
+               tdp_identity_map_default_memslots(vmx, vm);
+               tdp_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
+               tdp_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
        }
 
        bmap = bitmap_zalloc(TEST_MEM_PAGES);