bool ept_1g_pages_supported(void);
-void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr,
- uint64_t paddr);
-void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr,
- uint64_t paddr, uint64_t size);
-void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
- struct kvm_vm *vm);
-void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t addr, uint64_t size);
+void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size);
+void tdp_identity_map_default_memslots(struct kvm_vm *vm);
+void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size);
bool kvm_cpu_has_ept(void);
void vm_enable_ept(struct kvm_vm *vm);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
return 513 + 10 * nr_vcpus;
}
-static void memstress_setup_ept_mappings(struct vmx_pages *vmx, struct kvm_vm *vm)
+static void memstress_setup_ept_mappings(struct kvm_vm *vm)
{
uint64_t start, end;
* KVM can shadow the EPT12 with the maximum huge page size supported
* by the backing source.
*/
- tdp_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
+ tdp_identity_map_1g(vm, 0, 0x100000000ULL);
start = align_down(memstress_args.gpa, PG_SIZE_1G);
end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
- tdp_identity_map_1g(vmx, vm, start, end - start);
+ tdp_identity_map_1g(vm, start, end - start);
}
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
{
- struct vmx_pages *vmx;
struct kvm_regs regs;
vm_vaddr_t vmx_gva;
int vcpu_id;
vm_enable_ept(vm);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- vmx = vcpu_alloc_vmx(vm, &vmx_gva);
+ vcpu_alloc_vmx(vm, &vmx_gva);
/* The EPTs are shared across vCPUs, setup the mappings once */
if (vcpu_id == 0)
- memstress_setup_ept_mappings(vmx, vm);
+ memstress_setup_ept_mappings(vm);
/*
* Override the vCPU to run memstress_l1_guest_code() which will
}
-void __tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, int target_level)
+void __tdp_pg_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
+ int target_level)
{
const uint64_t page_size = PG_LEVEL_SIZE(target_level);
void *eptp_hva = addr_gpa2hva(vm, vm->arch.tdp_mmu->pgd);
}
}
-void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr)
-{
- __tdp_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
-}
-
/*
* Map a range of EPT guest physical addresses to the VM's physical address
*
* Within the VM given by vm, creates a nested guest translation for the
* page range starting at nested_paddr to the page range starting at paddr.
*/
-void __tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint64_t size,
- int level)
+void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
+ uint64_t size, int level)
{
size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
- __tdp_pg_map(vmx, vm, nested_paddr, paddr, level);
+ __tdp_pg_map(vm, nested_paddr, paddr, level);
nested_paddr += page_size;
paddr += page_size;
}
}
-void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint64_t size)
+void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
+ uint64_t size)
{
- __tdp_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
+ __tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
}
/* Prepare an identity extended page table that maps all the
* physical pages in VM.
*/
-void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
- struct kvm_vm *vm)
+void tdp_identity_map_default_memslots(struct kvm_vm *vm)
{
uint32_t s, memslot = 0;
sparsebit_idx_t i, last;
if (i > last)
break;
- tdp_map(vmx, vm, (uint64_t)i << vm->page_shift,
+ tdp_map(vm, (uint64_t)i << vm->page_shift,
(uint64_t)i << vm->page_shift, 1 << vm->page_shift);
}
}
/* Identity map a region with 1GiB Pages. */
-void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t addr, uint64_t size)
+void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
{
- __tdp_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
+ __tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
}
bool kvm_cpu_has_ept(void)
static void test_vmx_dirty_log(bool enable_ept)
{
vm_vaddr_t vmx_pages_gva = 0;
- struct vmx_pages *vmx;
unsigned long *bmap;
uint64_t *host_test_mem;
if (enable_ept)
vm_enable_ept(vm);
- vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
/* Add an extra memory slot for testing dirty logging */
* GPAs as the EPT enabled case.
*/
if (enable_ept) {
- tdp_identity_map_default_memslots(vmx, vm);
- tdp_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
- tdp_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
+ tdp_identity_map_default_memslots(vm);
+ tdp_map(vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
+ tdp_map(vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
}
bmap = bitmap_zalloc(TEST_MEM_PAGES);