#define PTE_NX_MASK(mmu) ((mmu)->arch.pte_masks.nx)
#define PTE_C_BIT_MASK(mmu) ((mmu)->arch.pte_masks.c)
#define PTE_S_BIT_MASK(mmu) ((mmu)->arch.pte_masks.s)
+#define PTE_ALWAYS_SET_MASK(mmu) ((mmu)->arch.pte_masks.always_set)
/*
* For PTEs without a PRESENT bit (i.e. EPT entries), treat the PTE as present
if (!is_present_pte(mmu, pte)) {
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
- PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu);
+ PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
+ PTE_ALWAYS_SET_MASK(mmu);
if (current_level == target_level)
*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
else
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
- (paddr & PHYSICAL_PAGE_MASK);
+ PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
/*
* Neither SEV nor TDX supports shared page tables, so only the final
pte_masks = vm->mmu.arch.pte_masks;
pte_masks.c = 0;
+ /* NPT walks are treated as user accesses, so set the 'user' bit. */
+ pte_masks.always_set = pte_masks.user;
+
tdp_mmu_init(vm, vm->mmu.pgtable_levels, &pte_masks);
}