]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Merge tag 'kvm-x86-selftests-6.20' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 9 Feb 2026 17:38:54 +0000 (18:38 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 9 Feb 2026 17:38:54 +0000 (18:38 +0100)
KVM selftests changes for 6.20

 - Add a regression test for TPR<=>CR8 synchronization and IRQ masking.

 - Overhaul selftest's MMU infrastructure to genericize stage-2 MMU support,
   and extend x86's infrastructure to support EPT and NPT (for L2 guests).

 - Extend several nested VMX tests to also cover nested SVM.

 - Add a selftest for nested VMLOAD/VMSAVE.

 - Rework the nested dirty log test, originally added as a regression test for
   PML where KVM logged L2 GPAs instead of L1 GPAs, to improve test coverage
   and to hopefully make the test easier to understand and maintain.

1  2 
tools/testing/selftests/kvm/Makefile.kvm
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/arm64/processor.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/riscv/processor.c

index 1605dc740d1e18911ec5bf82fa8473d7ea970fe9,c40f59d4831117ae1ae1a2afd2035f3d507646e0..43ea40edc53304b85490d17bb70a6272a72818fc
  
  static vm_vaddr_t exception_handlers;
  
 -static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
 -{
 -      return (v + vm->page_size) & ~(vm->page_size - 1);
 -}
 -
  static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
  {
-       unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
+       unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
        uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
  
        return (gva >> shift) & mask;
@@@ -110,15 -115,15 +110,15 @@@ static uint64_t __maybe_unused ptrs_per
  
  void virt_arch_pgd_alloc(struct kvm_vm *vm)
  {
 -      size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
 +      size_t nr_pages = vm_page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
  
-       if (vm->pgd_created)
+       if (vm->mmu.pgd_created)
                return;
  
-       vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
-                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR,
-                                    vm->memslots[MEM_REGION_PT]);
-       vm->pgd_created = true;
+       vm->mmu.pgd = vm_phy_pages_alloc(vm, nr_pages,
+                                        KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+                                        vm->memslots[MEM_REGION_PT]);
+       vm->mmu.pgd_created = true;
  }
  
  static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
index 401245fe31dbe02b8b79d1c8c0d0911457e0eb8d,e6ec7c224fc3e1505b66563a34815efeefa55ddc..7663bbabcf1a3628e8de7c19d68e1444c42b08e9
@@@ -63,15 -68,15 +63,15 @@@ static uint64_t pte_index(struct kvm_v
  
  void virt_arch_pgd_alloc(struct kvm_vm *vm)
  {
 -      size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
 +      size_t nr_pages = vm_page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
  
-       if (vm->pgd_created)
+       if (vm->mmu.pgd_created)
                return;
  
-       vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
-                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR,
-                                    vm->memslots[MEM_REGION_PT]);
-       vm->pgd_created = true;
+       vm->mmu.pgd = vm_phy_pages_alloc(vm, nr_pages,
+                                        KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+                                        vm->memslots[MEM_REGION_PT]);
+       vm->mmu.pgd_created = true;
  }
  
  void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)