]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: selftests: Set the user bit on nested NPT PTEs
authorYosry Ahmed <yosry.ahmed@linux.dev>
Tue, 30 Dec 2025 23:01:46 +0000 (15:01 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 20:02:15 +0000 (12:02 -0800)
According to the APM, NPT walks are treated as user accesses. In
preparation for supporting NPT mappings, set the 'user' bit on NPTs by
adding a mask of bits to always be set on PTEs in kvm_mmu.

Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251230230150.4150236-18-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
tools/testing/selftests/kvm/include/x86/processor.h
tools/testing/selftests/kvm/lib/x86/processor.c
tools/testing/selftests/kvm/lib/x86/svm.c

index 1cf84b8212c62417e45300d8fa7e21261ac2bc0c..be35d26bb32005e21378edcc924aee6ed3fa464e 100644 (file)
@@ -22,6 +22,8 @@ struct pte_masks {
        uint64_t nx;
        uint64_t c;
        uint64_t s;
+
+       uint64_t always_set;
 };
 
 struct kvm_mmu_arch {
index 115bec5eb1ebb2ccfcbccfeddf843c11ed55467b..995277cae94edd4633e2a9b2fd2297f9985f950f 100644 (file)
@@ -1452,6 +1452,7 @@ enum pg_level {
 #define PTE_NX_MASK(mmu)               ((mmu)->arch.pte_masks.nx)
 #define PTE_C_BIT_MASK(mmu)            ((mmu)->arch.pte_masks.c)
 #define PTE_S_BIT_MASK(mmu)            ((mmu)->arch.pte_masks.s)
+#define PTE_ALWAYS_SET_MASK(mmu)       ((mmu)->arch.pte_masks.always_set)
 
 /*
  * For PTEs without a PRESENT bit (i.e. EPT entries), treat the PTE as present
index a3a4c9a4cbcbd34650744a428a91f11761351973..5a3385d489024d66d5d6ecc20e942677064051f8 100644 (file)
@@ -231,7 +231,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
 
        if (!is_present_pte(mmu, pte)) {
                *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
-                      PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu);
+                      PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
+                      PTE_ALWAYS_SET_MASK(mmu);
                if (current_level == target_level)
                        *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
                else
@@ -299,7 +300,7 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
                    "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
        *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
               PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
-              (paddr & PHYSICAL_PAGE_MASK);
+              PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
 
        /*
         * Neither SEV nor TDX supports shared page tables, so only the final
index a25a3471f5f6749621e0370400dac8ef4d684469..2e5c480c9afd481077c0f12db42e577f9fc54736 100644 (file)
@@ -75,6 +75,9 @@ void vm_enable_npt(struct kvm_vm *vm)
        pte_masks = vm->mmu.arch.pte_masks;
        pte_masks.c = 0;
 
+       /* NPT walks are treated as user accesses, so set the 'user' bit. */
+       pte_masks.always_set = pte_masks.user;
+
        tdp_mmu_init(vm, vm->mmu.pgtable_levels, &pte_masks);
 }