From e40e72fec0dea9ac55aea84a0d76ccb7d7f32204 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 30 Dec 2025 15:01:40 -0800 Subject: [PATCH] KVM: selftests: Stop passing VMX metadata to TDP mapping functions The root GPA is now retrieved from the nested MMU, stop passing VMX metadata. This is in preparation for making these functions work for NPTs as well. Opportunistically drop tdp_pg_map() since it's unused. No functional change intended. Signed-off-by: Yosry Ahmed Link: https://patch.msgid.link/20251230230150.4150236-12-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86/vmx.h | 11 ++----- .../testing/selftests/kvm/lib/x86/memstress.c | 11 +++---- tools/testing/selftests/kvm/lib/x86/vmx.c | 33 +++++++------------ .../selftests/kvm/x86/vmx_dirty_log_test.c | 9 +++-- 4 files changed, 24 insertions(+), 40 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h index 1fd83c23529ac..4dd4c2094ee64 100644 --- a/tools/testing/selftests/kvm/include/x86/vmx.h +++ b/tools/testing/selftests/kvm/include/x86/vmx.h @@ -557,14 +557,9 @@ bool load_vmcs(struct vmx_pages *vmx); bool ept_1g_pages_supported(void); -void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, - uint64_t paddr); -void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, - uint64_t paddr, uint64_t size); -void tdp_identity_map_default_memslots(struct vmx_pages *vmx, - struct kvm_vm *vm); -void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t addr, uint64_t size); +void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size); +void tdp_identity_map_default_memslots(struct kvm_vm *vm); +void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size); bool kvm_cpu_has_ept(void); void vm_enable_ept(struct kvm_vm *vm); void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c index 00f7f11e5f0eb..3319cb57a78d9 100644 --- a/tools/testing/selftests/kvm/lib/x86/memstress.c +++ b/tools/testing/selftests/kvm/lib/x86/memstress.c @@ -59,7 +59,7 @@ uint64_t memstress_nested_pages(int nr_vcpus) return 513 + 10 * nr_vcpus; } -static void memstress_setup_ept_mappings(struct vmx_pages *vmx, struct kvm_vm *vm) +static void memstress_setup_ept_mappings(struct kvm_vm *vm) { uint64_t start, end; @@ -68,16 +68,15 @@ static void memstress_setup_ept_mappings(struct vmx_pages *vmx, struct kvm_vm *v * KVM can shadow the EPT12 with the maximum huge page size supported * by the backing source. */ - tdp_identity_map_1g(vmx, vm, 0, 0x100000000ULL); + tdp_identity_map_1g(vm, 0, 0x100000000ULL); start = align_down(memstress_args.gpa, PG_SIZE_1G); end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G); - tdp_identity_map_1g(vmx, vm, start, end - start); + tdp_identity_map_1g(vm, start, end - start); } void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) { - struct vmx_pages *vmx; struct kvm_regs regs; vm_vaddr_t vmx_gva; int vcpu_id; @@ -87,11 +86,11 @@ void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc vm_enable_ept(vm); for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - vmx = vcpu_alloc_vmx(vm, &vmx_gva); + vcpu_alloc_vmx(vm, &vmx_gva); /* The EPTs are shared across vCPUs, setup the mappings once */ if (vcpu_id == 0) - memstress_setup_ept_mappings(vmx, vm); + memstress_setup_ept_mappings(vm); /* * Override the vCPU to run memstress_l1_guest_code() which will diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c index 9d4e391fdf2c1..ea1c09f9e8ab8 100644 --- a/tools/testing/selftests/kvm/lib/x86/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86/vmx.c @@ -409,8 +409,8 @@ static void tdp_create_pte(struct kvm_vm *vm, } -void __tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, int target_level) +void __tdp_pg_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, + int target_level) { const uint64_t page_size = PG_LEVEL_SIZE(target_level); void *eptp_hva = addr_gpa2hva(vm, vm->arch.tdp_mmu->pgd); @@ -453,12 +453,6 @@ void __tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, } } -void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr) -{ - __tdp_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); -} - /* * Map a range of EPT guest physical addresses to the VM's physical address * @@ -476,9 +470,8 @@ void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, * Within the VM given by vm, creates a nested guest translation for the * page range starting at nested_paddr to the page range starting at paddr. */ -void __tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, uint64_t size, - int level) +void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, + uint64_t size, int level) { size_t page_size = PG_LEVEL_SIZE(level); size_t npages = size / page_size; @@ -487,23 +480,22 @@ void __tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); while (npages--) { - __tdp_pg_map(vmx, vm, nested_paddr, paddr, level); + __tdp_pg_map(vm, nested_paddr, paddr, level); nested_paddr += page_size; paddr += page_size; } } -void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t nested_paddr, uint64_t paddr, uint64_t size) +void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, + uint64_t size) { - __tdp_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); + __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K); } /* Prepare an identity extended page table that maps all the * physical pages in VM. */ -void tdp_identity_map_default_memslots(struct vmx_pages *vmx, - struct kvm_vm *vm) +void tdp_identity_map_default_memslots(struct kvm_vm *vm) { uint32_t s, memslot = 0; sparsebit_idx_t i, last; @@ -520,16 +512,15 @@ void tdp_identity_map_default_memslots(struct vmx_pages *vmx, if (i > last) break; - tdp_map(vmx, vm, (uint64_t)i << vm->page_shift, + tdp_map(vm, (uint64_t)i << vm->page_shift, (uint64_t)i << vm->page_shift, 1 << vm->page_shift); } } /* Identity map a region with 1GiB Pages. */ -void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, - uint64_t addr, uint64_t size) +void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size) { - __tdp_map(vmx, vm, addr, addr, size, PG_LEVEL_1G); + __tdp_map(vm, addr, addr, size, PG_LEVEL_1G); } bool kvm_cpu_has_ept(void) diff --git a/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c index 5c8cf8ac42a27..370f8d3117c29 100644 --- a/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c @@ -80,7 +80,6 @@ void l1_guest_code(struct vmx_pages *vmx) static void test_vmx_dirty_log(bool enable_ept) { vm_vaddr_t vmx_pages_gva = 0; - struct vmx_pages *vmx; unsigned long *bmap; uint64_t *host_test_mem; @@ -96,7 +95,7 @@ static void test_vmx_dirty_log(bool enable_ept) if (enable_ept) vm_enable_ept(vm); - vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva); /* Add an extra memory slot for testing dirty logging */ @@ -120,9 +119,9 @@ static void test_vmx_dirty_log(bool enable_ept) * GPAs as the EPT enabled case. */ if (enable_ept) { - tdp_identity_map_default_memslots(vmx, vm); - tdp_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE); - tdp_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE); + tdp_identity_map_default_memslots(vm); + tdp_map(vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE); + tdp_map(vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE); } bmap = bitmap_zalloc(TEST_MEM_PAGES); -- 2.47.3