Now that KVM selftests use gva_t instead of vm_vaddr_t, rename the API
for finding an unused range of virtual memory to drop the defunct
terminology and use "vm" for the scope.
Opportunistically clean up the function comment to drop superfluous
and redundant information.
No functional change intended.
Link: https://patch.msgid.link/20260420212004.3938325-13-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
void vm_mem_region_delete(struct kvm_vm *vm, u32 slot);
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
-gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min);
+gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min);
gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min);
gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t vaddr_min,
enum kvm_mem_region_type type);
void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
- gva_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+ gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
}
/*
- * VM Virtual Address Unused Gap
- *
- * Input Args:
- * vm - Virtual Machine
- * sz - Size (bytes)
- * vaddr_min - Minimum Virtual Address
- *
- * Output Args: None
- *
- * Return:
- * Lowest virtual address at or above vaddr_min, with at least
- * sz unused bytes. TEST_ASSERT failure if no area of at least
- * size sz is available.
- *
- * Within the VM specified by vm, locates the lowest starting virtual
- * address >= vaddr_min, that has at least sz unallocated bytes. A
+ * Within the VM specified by @vm, locates the lowest starting guest virtual
+ * address >= @vaddr_min, that has at least @sz unallocated bytes. A
* TEST_ASSERT failure occurs for invalid input or no area of at least
- * sz unallocated bytes >= vaddr_min is available.
+ * @sz unallocated bytes >= @min_gva is available.
*/
-gva_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min)
+gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t vaddr_min)
{
u64 pages = (sz + vm->page_size - 1) >> vm->page_shift;
* Find an unused range of virtual page addresses of at least
* pages in length.
*/
- gva_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
+ gva_t vaddr_start = vm_unused_gva_gap(vm, sz, vaddr_min);
/* Map the virtual pages. */
for (gva_t vaddr = vaddr_start; pages > 0;
void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
{
- gva_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+ gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
virt_map(vm, mmio_gva, mmio_gpa, 1);
* Get PTE pointers for test pages and map them inside the guest.
* Use separate page for each PTE for simplicity.
*/
- gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
+ gva = vm_unused_gva_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
for (i = 0; i < NTEST_PAGES; i++) {
pte = vm_get_pte(vm, data->test_pages + i * PAGE_SIZE);
gpa = addr_hva2gpa(vm, pte);