vm_userspace_mem_region_add(
/*vm=*/vm,
/*src_type=*/src_type,
- /*guest_paddr=*/start_gpa,
+ /*gpa=*/start_gpa,
/*slot=*/1,
/*npages=*/num_guest_pages,
/*flags=*/0);
gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);
gva_t vm_alloc_page(struct kvm_vm *vm);
-void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr,
+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
unsigned int npages);
void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);
const char *exit_reason_str(unsigned int exit_reason);
-gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot);
-gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- gpa_t paddr_min, u32 memslot,
- bool protected);
+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);
+gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa,
+ u32 memslot, bool protected);
gpa_t vm_alloc_page_table(struct kvm_vm *vm);
static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- gpa_t paddr_min, u32 memslot)
+ gpa_t min_gpa, u32 memslot)
{
/*
* By default, allocate memory as protected for VMs that support
* protected memory, as the majority of memory for such VMs is
* protected, i.e. using shared memory is effectively opt-in.
*/
- return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
+ return __vm_phy_pages_alloc(vm, num, min_gpa, memslot,
vm_arch_has_protected_memory(vm));
}
/*
* Within @vm, creates a virtual translation for the page starting
- * at @gva to the page starting at @paddr.
+ * at @gva to the page starting at @gpa.
*/
-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr);
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa);
-static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
+static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- virt_arch_pg_map(vm, gva, paddr);
+ virt_arch_pg_map(vm, gva, gpa);
sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift);
}
void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
void kvm_arch_vm_release(struct kvm_vm *vm);
-bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr);
+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa);
u32 guest_get_vcpuid(void);
struct pte_masks *pte_masks);
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
- u64 paddr, int level);
-void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr,
+ gpa_t gpa, int level);
+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
u64 nr_bytes, int level);
void vm_enable_tdp(struct kvm_vm *vm);
bool kvm_cpu_has_tdp(void);
-void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size);
+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size);
void tdp_identity_map_default_memslots(struct kvm_vm *vm);
void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size);
u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);
vm->mmu.pgd_created = true;
}
-static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr,
+static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
u64 flags)
{
u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
" gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
"Invalid virtual address, gva: 0x%lx", gva);
- TEST_ASSERT((paddr % vm->page_size) == 0,
- "Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
- "Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ TEST_ASSERT((gpa % vm->page_size) == 0,
+ "Physical address not on page boundary,\n"
+ " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
+ "Physical address beyond beyond maximum supported,\n"
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;
if (!*ptep)
if (!use_lpa2_pte_format(vm))
pg_attr |= PTE_SHARED;
- *ptep = addr_pte(vm, paddr, pg_attr);
+ *ptep = addr_pte(vm, gpa, pg_attr);
}
-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
u64 attr_idx = MT_NORMAL;
- _virt_pg_map(vm, gva, paddr, attr_idx);
+ _virt_pg_map(vm, gva, gpa, attr_idx);
}
u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)
TEST_FAIL("A mem region with the requested slot "
"already exists.\n"
- " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
- " existing slot: %u paddr: 0x%lx size: 0x%lx",
+ " requested slot: %u gpa: 0x%lx npages: 0x%lx\n"
+ " existing slot: %u gpa: 0x%lx size: 0x%lx",
slot, gpa, npages, region->region.slot,
(u64)region->region.guest_phys_addr,
(u64)region->region.memory_size);
u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm);
- gpa_t paddr = __vm_phy_pages_alloc(vm, pages,
+ gpa_t gpa = __vm_phy_pages_alloc(vm, pages,
KVM_UTIL_MIN_PFN * vm->page_size,
vm->memslots[type], protected);
/* Map the virtual pages. */
for (gva_t gva = gva_start; pages > 0;
- pages--, gva += vm->page_size, paddr += vm->page_size) {
+ pages--, gva += vm->page_size, gpa += vm->page_size) {
- virt_pg_map(vm, gva, paddr);
+ virt_pg_map(vm, gva, gpa);
}
return gva_start;
* Map a range of VM virtual address to the VM's physical address.
*
* Within the VM given by @vm, creates a virtual translation for @npages
- * starting at @gva to the page range starting at @paddr.
+ * starting at @gva to the page range starting at @gpa.
*/
-void virt_map(struct kvm_vm *vm, gva_t gva, u64 paddr,
- unsigned int npages)
+void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages)
{
size_t page_size = vm->page_size;
size_t size = npages * page_size;
TEST_ASSERT(gva + size > gva, "Vaddr overflow");
- TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+ TEST_ASSERT(gpa + size > gpa, "Paddr overflow");
while (npages--) {
- virt_pg_map(vm, gva, paddr);
+ virt_pg_map(vm, gva, gpa);
gva += page_size;
- paddr += page_size;
+ gpa += page_size;
}
}
* Input Args:
* vm - Virtual Machine
* num - number of pages
- * paddr_min - Physical address minimum
+ * min_gpa - Physical address minimum
* memslot - Memory region to allocate page from
* protected - True if the pages will be used as protected/private memory
*
* Starting physical address
*
* Within the VM specified by vm, locates a range of available physical
- * pages at or above paddr_min. If found, the pages are marked as in use
+ * pages at or above min_gpa. If found, the pages are marked as in use
* and their base address is returned. A TEST_ASSERT failure occurs if
- * not enough pages are available at or above paddr_min.
+ * not enough pages are available at or above min_gpa.
*/
gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- gpa_t paddr_min, u32 memslot,
+ gpa_t min_gpa, u32 memslot,
bool protected)
{
struct userspace_mem_region *region;
TEST_ASSERT(num > 0, "Must allocate at least one page");
- TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
+ TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
- " paddr_min: 0x%lx page_size: 0x%x",
- paddr_min, vm->page_size);
+ " min_gpa: 0x%lx page_size: 0x%x",
+ min_gpa, vm->page_size);
region = memslot2region(vm, memslot);
TEST_ASSERT(!protected || region->protected_phy_pages,
"Region doesn't support protected memory");
- base = pg = paddr_min >> vm->page_shift;
+ base = pg = min_gpa >> vm->page_shift;
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
if (pg == 0) {
fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
- paddr_min, vm->page_size, memslot);
+ "min_gpa: 0x%lx page_size: 0x%x memslot: %u\n",
+ min_gpa, vm->page_size, memslot);
fputs("---- vm dump ----\n", stderr);
vm_dump(stderr, vm, 2);
abort();
return base * vm->page_size;
}
-gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t paddr_min, u32 memslot)
+gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot)
{
- return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+ return vm_phy_pages_alloc(vm, 1, min_gpa, memslot);
}
gpa_t vm_alloc_page_table(struct kvm_vm *vm)
kvm_selftest_arch_init();
}
-bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t paddr)
+bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa)
{
sparsebit_idx_t pg = 0;
struct userspace_mem_region *region;
if (!vm_arch_has_protected_memory(vm))
return false;
- region = userspace_mem_region_find(vm, paddr, paddr);
- TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
+ region = userspace_mem_region_find(vm, gpa, gpa);
+ TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa);
- pg = paddr >> vm->page_shift;
+ pg = gpa >> vm->page_shift;
return sparsebit_is_set(region->protected_phy_pages, pg);
}
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
}
-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
u32 prot_bits;
u64 *ptep;
"gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
"Invalid virtual address, gva: 0x%lx", gva);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- "paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ "gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- "paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ "gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
ptep = virt_populate_pte(vm, gva, 1);
prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
- WRITE_ONCE(*ptep, paddr | prot_bits);
+ WRITE_ONCE(*ptep, gpa | prot_bits);
}
static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
vm->mmu.pgd_created = true;
}
-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
u64 *ptep, next_ppn;
int level = vm->mmu.pgtable_levels - 1;
" gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
"Invalid virtual address, gva: 0x%lx", gva);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ " gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
if (!*ptep) {
level--;
}
- paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
- *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
+ gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT;
+ *ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) |
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
}
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
- gpa_t paddr;
+ gpa_t gpa;
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
vm->page_size);
if (vm->mmu.pgd_created)
return;
- paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
+ gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
vm->memslots[MEM_REGION_PT]);
- memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
+ memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size);
- vm->mmu.pgd = paddr;
+ vm->mmu.pgd = gpa;
vm->mmu.pgd_created = true;
}
"Invalid virtual address, gva: 0x%lx", gva);
TEST_ASSERT((gpa % vm->page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x",
+ " gpa: 0x%lx vm->page_size: 0x%x",
gva, vm->page_size);
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
gva, vm->max_gfn, vm->page_size);
/* Walk through region and segment tables */
struct kvm_mmu *mmu,
u64 *parent_pte,
gva_t gva,
- u64 paddr,
+ gpa_t gpa,
int current_level,
int target_level)
{
u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level);
- paddr = vm_untag_gpa(vm, paddr);
+ gpa = vm_untag_gpa(vm, gpa);
if (!is_present_pte(mmu, pte)) {
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
PTE_ALWAYS_SET_MASK(mmu);
if (current_level == target_level)
- *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
+ *pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
else
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
} else {
}
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
- u64 paddr, int level)
+ gpa_t gpa, int level)
{
const u64 pg_size = PG_LEVEL_SIZE(level);
u64 *pte = &mmu->pgd;
"gva: 0x%lx page size: 0x%lx", gva, pg_size);
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
"Invalid virtual address, gva: 0x%lx", gva);
- TEST_ASSERT((paddr % pg_size) == 0,
+ TEST_ASSERT((gpa % pg_size) == 0,
"Physical address not aligned,\n"
- " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
- TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ " gpa: 0x%lx page size: 0x%lx", gpa, pg_size);
+ TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond maximum supported,\n"
- " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
- paddr, vm->max_gfn, vm->page_size);
- TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
- "Unexpected bits in paddr: %lx", paddr);
+ " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ gpa, vm->max_gfn, vm->page_size);
+ TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa,
+ "Unexpected bits in gpa: %lx", gpa);
TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
"X and NX bit masks cannot be used simultaneously");
for (current_level = mmu->pgtable_levels;
current_level > PG_LEVEL_4K;
current_level--) {
- pte = virt_create_upper_pte(vm, mmu, pte, gva, paddr,
+ pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa,
current_level, level);
if (is_huge_pte(mmu, pte))
return;
"PTE already present for 4k page at gva: 0x%lx", gva);
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
- PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
+ PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
/*
* Neither SEV nor TDX supports shared page tables, so only the final
* leaf PTE needs manually set the C/S-bit.
*/
- if (vm_is_gpa_protected(vm, paddr))
+ if (vm_is_gpa_protected(vm, gpa))
*pte |= PTE_C_BIT_MASK(mmu);
else
*pte |= PTE_S_BIT_MASK(mmu);
}
-void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, u64 paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
{
- __virt_pg_map(vm, &vm->mmu, gva, paddr, PG_LEVEL_4K);
+ __virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K);
}
-void virt_map_level(struct kvm_vm *vm, gva_t gva, u64 paddr,
+void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
u64 nr_bytes, int level)
{
u64 pg_size = PG_LEVEL_SIZE(level);
nr_bytes, pg_size);
for (i = 0; i < nr_pages; i++) {
- __virt_pg_map(vm, &vm->mmu, gva, paddr, level);
+ __virt_pg_map(vm, &vm->mmu, gva, gpa, level);
sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift,
nr_bytes / PAGE_SIZE);
gva += pg_size;
- paddr += pg_size;
+ gpa += pg_size;
}
}
return kvm_cpu_has_ept() || kvm_cpu_has_npt();
}
-void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size, int level)
+void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level)
{
size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow");
- TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+ TEST_ASSERT(gpa + size > gpa, "GPA overflow");
while (npages--) {
- __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, paddr, level);
+ __virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level);
l2_gpa += page_size;
- paddr += page_size;
+ gpa += page_size;
}
}
-void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, u64 paddr, u64 size)
+void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size)
{
- __tdp_map(vm, l2_gpa, paddr, size, PG_LEVEL_4K);
+ __tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K);
}
/* Prepare an identity extended page table that maps all the