]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: selftests: Add support for nested NPTs
authorYosry Ahmed <yosry.ahmed@linux.dev>
Tue, 30 Dec 2025 23:01:45 +0000 (15:01 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 20:02:14 +0000 (12:02 -0800)
Implement nCR3 and NPT initialization functions, similar to the EPT
equivalents, and create common TDP helpers for enablement checking and
initialization. Enable NPT for nested guests by default if the TDP MMU
was initialized, similar to VMX.

Reuse the PTE masks from the main MMU in the NPT MMU, except for the C
and S bits related to confidential VMs.

Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251230230150.4150236-17-seanjc@google.com
[sean: apply Yosry's fixup for ncr3_gpa]
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86/processor.h
tools/testing/selftests/kvm/include/x86/svm_util.h
tools/testing/selftests/kvm/lib/x86/memstress.c
tools/testing/selftests/kvm/lib/x86/processor.c
tools/testing/selftests/kvm/lib/x86/svm.c
tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c

index 461cf155b96e1cdf60921c3ed090665ac27388cd..115bec5eb1ebb2ccfcbccfeddf843c11ed55467b 100644 (file)
@@ -1479,6 +1479,8 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
                    uint64_t nr_bytes, int level);
 
+void vm_enable_tdp(struct kvm_vm *vm);
+bool kvm_cpu_has_tdp(void);
 void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size);
 void tdp_identity_map_default_memslots(struct kvm_vm *vm);
 void tdp_identity_map_1g(struct kvm_vm *vm,  uint64_t addr, uint64_t size);
index b74c6dcddcbd64f5025fab8600f88e98d78043f2..5d7c42534bc45e65c4645b46443481772c8f0f58 100644 (file)
@@ -27,6 +27,9 @@ struct svm_test_data {
        void *msr; /* gva */
        void *msr_hva;
        uint64_t msr_gpa;
+
+       /* NPT */
+       uint64_t ncr3_gpa;
 };
 
 static inline void vmmcall(void)
@@ -57,6 +60,12 @@ struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
 
+static inline bool kvm_cpu_has_npt(void)
+{
+       return kvm_cpu_has(X86_FEATURE_NPT);
+}
+void vm_enable_npt(struct kvm_vm *vm);
+
 int open_sev_dev_path_or_exit(void);
 
 #endif /* SELFTEST_KVM_SVM_UTILS_H */
index 3319cb57a78d93c5c584e965c3449fcdd5e1daf5..407abfc34909da9fa2f135391b1eab5f25f9ee2f 100644 (file)
@@ -82,9 +82,9 @@ void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
        int vcpu_id;
 
        TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
-       TEST_REQUIRE(kvm_cpu_has_ept());
+       TEST_REQUIRE(kvm_cpu_has_tdp());
 
-       vm_enable_ept(vm);
+       vm_enable_tdp(vm);
        for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
                vcpu_alloc_vmx(vm, &vmx_gva);
 
index 29e7d172f94509c4788c6d4129bb114965a822b7..a3a4c9a4cbcbd34650744a428a91f11761351973 100644 (file)
@@ -8,7 +8,9 @@
 #include "kvm_util.h"
 #include "pmu.h"
 #include "processor.h"
+#include "svm_util.h"
 #include "sev.h"
+#include "vmx.h"
 
 #ifndef NUM_INTERRUPTS
 #define NUM_INTERRUPTS 256
@@ -472,6 +474,19 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
        }
 }
 
+void vm_enable_tdp(struct kvm_vm *vm)
+{
+       if (kvm_cpu_has(X86_FEATURE_VMX))
+               vm_enable_ept(vm);
+       else
+               vm_enable_npt(vm);
+}
+
+bool kvm_cpu_has_tdp(void)
+{
+       return kvm_cpu_has_ept() || kvm_cpu_has_npt();
+}
+
 void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
               uint64_t size, int level)
 {
index d239c2097391868028fead8a453bd77f8224c95b..a25a3471f5f6749621e0370400dac8ef4d684469 100644 (file)
@@ -46,6 +46,9 @@ vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
        svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr);
        memset(svm->msr_hva, 0, getpagesize());
 
+       if (vm->stage2_mmu.pgd_created)
+               svm->ncr3_gpa = vm->stage2_mmu.pgd;
+
        *p_svm_gva = svm_gva;
        return svm;
 }
@@ -59,6 +62,22 @@ static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
        seg->base = base;
 }
 
+void vm_enable_npt(struct kvm_vm *vm)
+{
+       struct pte_masks pte_masks;
+
+       TEST_ASSERT(kvm_cpu_has_npt(), "KVM doesn't supported nested NPT");
+
+       /*
+        * NPTs use the same PTE format, but deliberately drop the C-bit as the
+        * per-VM shared vs. private information is only meant for stage-1.
+        */
+       pte_masks = vm->mmu.arch.pte_masks;
+       pte_masks.c = 0;
+
+       tdp_mmu_init(vm, vm->mmu.pgtable_levels, &pte_masks);
+}
+
 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
 {
        struct vmcb *vmcb = svm->vmcb;
@@ -102,6 +121,11 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
        vmcb->save.rip = (u64)guest_rip;
        vmcb->save.rsp = (u64)guest_rsp;
        guest_regs.rdi = (u64)svm;
+
+       if (svm->ncr3_gpa) {
+               ctrl->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
+               ctrl->nested_cr3 = svm->ncr3_gpa;
+       }
 }
 
 /*
index 370f8d3117c29d2050cbd8900ecda3d2d8ad92ec..032ab8bf60a44ffe9a50d8f33f2e728c1efcc56d 100644 (file)
@@ -93,7 +93,7 @@ static void test_vmx_dirty_log(bool enable_ept)
        /* Create VM */
        vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
        if (enable_ept)
-               vm_enable_ept(vm);
+               vm_enable_tdp(vm);
 
        vcpu_alloc_vmx(vm, &vmx_pages_gva);
        vcpu_args_set(vcpu, 1, vmx_pages_gva);
@@ -170,7 +170,7 @@ int main(int argc, char *argv[])
 
        test_vmx_dirty_log(/*enable_ept=*/false);
 
-       if (kvm_cpu_has_ept())
+       if (kvm_cpu_has_tdp())
                test_vmx_dirty_log(/*enable_ept=*/true);
 
        return 0;