1 // SPDX-License-Identifier: GPL-2.0-only
3 * tools/testing/selftests/kvm/lib/x86_64/vmx.c
5 * Copyright (C) 2018, Google LLC.
10 #include "../kvm_util_internal.h"
11 #include "processor.h"
14 #define PAGE_SHIFT_4K 12
16 #define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
20 struct eptPageTableEntry
{
23 uint64_t executable
:1;
24 uint64_t memory_type
:3;
25 uint64_t ignore_pat
:1;
29 uint64_t ignored_11_10
:2;
31 uint64_t ignored_62_52
:11;
32 uint64_t suppress_ve
:1;
35 struct eptPageTablePointer
{
36 uint64_t memory_type
:3;
37 uint64_t page_walk_length
:3;
38 uint64_t ad_enabled
:1;
39 uint64_t reserved_11_07
:5;
41 uint64_t reserved_63_52
:12;
43 int vcpu_enable_evmcs(struct kvm_vm
*vm
, int vcpu_id
)
47 struct kvm_enable_cap enable_evmcs_cap
= {
48 .cap
= KVM_CAP_HYPERV_ENLIGHTENED_VMCS
,
49 .args
[0] = (unsigned long)&evmcs_ver
52 vcpu_ioctl(vm
, vcpu_id
, KVM_ENABLE_CAP
, &enable_evmcs_cap
);
54 /* KVM should return supported EVMCS version range */
55 TEST_ASSERT(((evmcs_ver
>> 8) >= (evmcs_ver
& 0xff)) &&
56 (evmcs_ver
& 0xff) > 0,
57 "Incorrect EVMCS version range: %x:%x\n",
58 evmcs_ver
& 0xff, evmcs_ver
>> 8);
63 /* Allocate memory regions for nested VMX tests.
66 * vm - The VM to allocate guest-virtual addresses in.
69 * p_vmx_gva - The guest virtual address for the struct vmx_pages.
72 * Pointer to structure with the addresses of the VMX areas.
75 vcpu_alloc_vmx(struct kvm_vm
*vm
, vm_vaddr_t
*p_vmx_gva
)
77 vm_vaddr_t vmx_gva
= vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
78 struct vmx_pages
*vmx
= addr_gva2hva(vm
, vmx_gva
);
80 /* Setup of a region of guest memory for the vmxon region. */
81 vmx
->vmxon
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
82 vmx
->vmxon_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->vmxon
);
83 vmx
->vmxon_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->vmxon
);
85 /* Setup of a region of guest memory for a vmcs. */
86 vmx
->vmcs
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
87 vmx
->vmcs_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->vmcs
);
88 vmx
->vmcs_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->vmcs
);
90 /* Setup of a region of guest memory for the MSR bitmap. */
91 vmx
->msr
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
92 vmx
->msr_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->msr
);
93 vmx
->msr_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->msr
);
94 memset(vmx
->msr_hva
, 0, getpagesize());
96 /* Setup of a region of guest memory for the shadow VMCS. */
97 vmx
->shadow_vmcs
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
98 vmx
->shadow_vmcs_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->shadow_vmcs
);
99 vmx
->shadow_vmcs_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->shadow_vmcs
);
101 /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
102 vmx
->vmread
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
103 vmx
->vmread_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->vmread
);
104 vmx
->vmread_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->vmread
);
105 memset(vmx
->vmread_hva
, 0, getpagesize());
107 vmx
->vmwrite
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
108 vmx
->vmwrite_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->vmwrite
);
109 vmx
->vmwrite_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->vmwrite
);
110 memset(vmx
->vmwrite_hva
, 0, getpagesize());
112 /* Setup of a region of guest memory for the VP Assist page. */
113 vmx
->vp_assist
= (void *)vm_vaddr_alloc(vm
, getpagesize(),
115 vmx
->vp_assist_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->vp_assist
);
116 vmx
->vp_assist_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->vp_assist
);
118 /* Setup of a region of guest memory for the enlightened VMCS. */
119 vmx
->enlightened_vmcs
= (void *)vm_vaddr_alloc(vm
, getpagesize(),
121 vmx
->enlightened_vmcs_hva
=
122 addr_gva2hva(vm
, (uintptr_t)vmx
->enlightened_vmcs
);
123 vmx
->enlightened_vmcs_gpa
=
124 addr_gva2gpa(vm
, (uintptr_t)vmx
->enlightened_vmcs
);
126 *p_vmx_gva
= vmx_gva
;
130 bool prepare_for_vmx_operation(struct vmx_pages
*vmx
)
132 uint64_t feature_control
;
138 * Ensure bits in CR0 and CR4 are valid in VMX operation:
139 * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
140 * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
142 __asm__
__volatile__("mov %%cr0, %0" : "=r"(cr0
) : : "memory");
143 cr0
&= rdmsr(MSR_IA32_VMX_CR0_FIXED1
);
144 cr0
|= rdmsr(MSR_IA32_VMX_CR0_FIXED0
);
145 __asm__
__volatile__("mov %0, %%cr0" : : "r"(cr0
) : "memory");
147 __asm__
__volatile__("mov %%cr4, %0" : "=r"(cr4
) : : "memory");
148 cr4
&= rdmsr(MSR_IA32_VMX_CR4_FIXED1
);
149 cr4
|= rdmsr(MSR_IA32_VMX_CR4_FIXED0
);
150 /* Enable VMX operation */
152 __asm__
__volatile__("mov %0, %%cr4" : : "r"(cr4
) : "memory");
155 * Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
156 * Bit 0: Lock bit. If clear, VMXON causes a #GP.
157 * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
158 * outside of SMX causes a #GP.
160 required
= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
;
161 required
|= FEAT_CTL_LOCKED
;
162 feature_control
= rdmsr(MSR_IA32_FEAT_CTL
);
163 if ((feature_control
& required
) != required
)
164 wrmsr(MSR_IA32_FEAT_CTL
, feature_control
| required
);
166 /* Enter VMX root operation. */
167 *(uint32_t *)(vmx
->vmxon
) = vmcs_revision();
168 if (vmxon(vmx
->vmxon_gpa
))
174 bool load_vmcs(struct vmx_pages
*vmx
)
178 *(uint32_t *)(vmx
->vmcs
) = vmcs_revision();
179 if (vmclear(vmx
->vmcs_gpa
))
182 if (vmptrld(vmx
->vmcs_gpa
))
185 /* Setup shadow VMCS, do not load it yet. */
186 *(uint32_t *)(vmx
->shadow_vmcs
) =
187 vmcs_revision() | 0x80000000ul
;
188 if (vmclear(vmx
->shadow_vmcs_gpa
))
191 if (evmcs_vmptrld(vmx
->enlightened_vmcs_gpa
,
192 vmx
->enlightened_vmcs
))
194 current_evmcs
->revision_id
= EVMCS_VERSION
;
201 * Initialize the control fields to the most basic settings possible.
203 static inline void init_vmcs_control_fields(struct vmx_pages
*vmx
)
205 uint32_t sec_exec_ctl
= 0;
207 vmwrite(VIRTUAL_PROCESSOR_ID
, 0);
208 vmwrite(POSTED_INTR_NV
, 0);
210 vmwrite(PIN_BASED_VM_EXEC_CONTROL
, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS
));
214 struct eptPageTablePointer eptp
= {
215 .memory_type
= VMX_BASIC_MEM_TYPE_WB
,
216 .page_walk_length
= 3, /* + 1 */
217 .ad_enabled
= !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP
) & VMX_EPT_VPID_CAP_AD_BITS
),
218 .address
= vmx
->eptp_gpa
>> PAGE_SHIFT_4K
,
221 memcpy(&ept_paddr
, &eptp
, sizeof(ept_paddr
));
222 vmwrite(EPT_POINTER
, ept_paddr
);
223 sec_exec_ctl
|= SECONDARY_EXEC_ENABLE_EPT
;
226 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL
, sec_exec_ctl
))
227 vmwrite(CPU_BASED_VM_EXEC_CONTROL
,
228 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS
) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
);
230 vmwrite(CPU_BASED_VM_EXEC_CONTROL
, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS
));
231 GUEST_ASSERT(!sec_exec_ctl
);
234 vmwrite(EXCEPTION_BITMAP
, 0);
235 vmwrite(PAGE_FAULT_ERROR_CODE_MASK
, 0);
236 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH
, -1); /* Never match */
237 vmwrite(CR3_TARGET_COUNT
, 0);
238 vmwrite(VM_EXIT_CONTROLS
, rdmsr(MSR_IA32_VMX_EXIT_CTLS
) |
239 VM_EXIT_HOST_ADDR_SPACE_SIZE
); /* 64-bit host */
240 vmwrite(VM_EXIT_MSR_STORE_COUNT
, 0);
241 vmwrite(VM_EXIT_MSR_LOAD_COUNT
, 0);
242 vmwrite(VM_ENTRY_CONTROLS
, rdmsr(MSR_IA32_VMX_ENTRY_CTLS
) |
243 VM_ENTRY_IA32E_MODE
); /* 64-bit guest */
244 vmwrite(VM_ENTRY_MSR_LOAD_COUNT
, 0);
245 vmwrite(VM_ENTRY_INTR_INFO_FIELD
, 0);
246 vmwrite(TPR_THRESHOLD
, 0);
248 vmwrite(CR0_GUEST_HOST_MASK
, 0);
249 vmwrite(CR4_GUEST_HOST_MASK
, 0);
250 vmwrite(CR0_READ_SHADOW
, get_cr0());
251 vmwrite(CR4_READ_SHADOW
, get_cr4());
253 vmwrite(MSR_BITMAP
, vmx
->msr_gpa
);
254 vmwrite(VMREAD_BITMAP
, vmx
->vmread_gpa
);
255 vmwrite(VMWRITE_BITMAP
, vmx
->vmwrite_gpa
);
259 * Initialize the host state fields based on the current host state, with
260 * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
263 static inline void init_vmcs_host_state(void)
265 uint32_t exit_controls
= vmreadz(VM_EXIT_CONTROLS
);
267 vmwrite(HOST_ES_SELECTOR
, get_es());
268 vmwrite(HOST_CS_SELECTOR
, get_cs());
269 vmwrite(HOST_SS_SELECTOR
, get_ss());
270 vmwrite(HOST_DS_SELECTOR
, get_ds());
271 vmwrite(HOST_FS_SELECTOR
, get_fs());
272 vmwrite(HOST_GS_SELECTOR
, get_gs());
273 vmwrite(HOST_TR_SELECTOR
, get_tr());
275 if (exit_controls
& VM_EXIT_LOAD_IA32_PAT
)
276 vmwrite(HOST_IA32_PAT
, rdmsr(MSR_IA32_CR_PAT
));
277 if (exit_controls
& VM_EXIT_LOAD_IA32_EFER
)
278 vmwrite(HOST_IA32_EFER
, rdmsr(MSR_EFER
));
279 if (exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
280 vmwrite(HOST_IA32_PERF_GLOBAL_CTRL
,
281 rdmsr(MSR_CORE_PERF_GLOBAL_CTRL
));
283 vmwrite(HOST_IA32_SYSENTER_CS
, rdmsr(MSR_IA32_SYSENTER_CS
));
285 vmwrite(HOST_CR0
, get_cr0());
286 vmwrite(HOST_CR3
, get_cr3());
287 vmwrite(HOST_CR4
, get_cr4());
288 vmwrite(HOST_FS_BASE
, rdmsr(MSR_FS_BASE
));
289 vmwrite(HOST_GS_BASE
, rdmsr(MSR_GS_BASE
));
290 vmwrite(HOST_TR_BASE
,
291 get_desc64_base((struct desc64
*)(get_gdt().address
+ get_tr())));
292 vmwrite(HOST_GDTR_BASE
, get_gdt().address
);
293 vmwrite(HOST_IDTR_BASE
, get_idt().address
);
294 vmwrite(HOST_IA32_SYSENTER_ESP
, rdmsr(MSR_IA32_SYSENTER_ESP
));
295 vmwrite(HOST_IA32_SYSENTER_EIP
, rdmsr(MSR_IA32_SYSENTER_EIP
));
299 * Initialize the guest state fields essentially as a clone of
300 * the host state fields. Some host state fields have fixed
301 * values, and we set the corresponding guest state fields accordingly.
303 static inline void init_vmcs_guest_state(void *rip
, void *rsp
)
305 vmwrite(GUEST_ES_SELECTOR
, vmreadz(HOST_ES_SELECTOR
));
306 vmwrite(GUEST_CS_SELECTOR
, vmreadz(HOST_CS_SELECTOR
));
307 vmwrite(GUEST_SS_SELECTOR
, vmreadz(HOST_SS_SELECTOR
));
308 vmwrite(GUEST_DS_SELECTOR
, vmreadz(HOST_DS_SELECTOR
));
309 vmwrite(GUEST_FS_SELECTOR
, vmreadz(HOST_FS_SELECTOR
));
310 vmwrite(GUEST_GS_SELECTOR
, vmreadz(HOST_GS_SELECTOR
));
311 vmwrite(GUEST_LDTR_SELECTOR
, 0);
312 vmwrite(GUEST_TR_SELECTOR
, vmreadz(HOST_TR_SELECTOR
));
313 vmwrite(GUEST_INTR_STATUS
, 0);
314 vmwrite(GUEST_PML_INDEX
, 0);
316 vmwrite(VMCS_LINK_POINTER
, -1ll);
317 vmwrite(GUEST_IA32_DEBUGCTL
, 0);
318 vmwrite(GUEST_IA32_PAT
, vmreadz(HOST_IA32_PAT
));
319 vmwrite(GUEST_IA32_EFER
, vmreadz(HOST_IA32_EFER
));
320 vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL
,
321 vmreadz(HOST_IA32_PERF_GLOBAL_CTRL
));
323 vmwrite(GUEST_ES_LIMIT
, -1);
324 vmwrite(GUEST_CS_LIMIT
, -1);
325 vmwrite(GUEST_SS_LIMIT
, -1);
326 vmwrite(GUEST_DS_LIMIT
, -1);
327 vmwrite(GUEST_FS_LIMIT
, -1);
328 vmwrite(GUEST_GS_LIMIT
, -1);
329 vmwrite(GUEST_LDTR_LIMIT
, -1);
330 vmwrite(GUEST_TR_LIMIT
, 0x67);
331 vmwrite(GUEST_GDTR_LIMIT
, 0xffff);
332 vmwrite(GUEST_IDTR_LIMIT
, 0xffff);
333 vmwrite(GUEST_ES_AR_BYTES
,
334 vmreadz(GUEST_ES_SELECTOR
) == 0 ? 0x10000 : 0xc093);
335 vmwrite(GUEST_CS_AR_BYTES
, 0xa09b);
336 vmwrite(GUEST_SS_AR_BYTES
, 0xc093);
337 vmwrite(GUEST_DS_AR_BYTES
,
338 vmreadz(GUEST_DS_SELECTOR
) == 0 ? 0x10000 : 0xc093);
339 vmwrite(GUEST_FS_AR_BYTES
,
340 vmreadz(GUEST_FS_SELECTOR
) == 0 ? 0x10000 : 0xc093);
341 vmwrite(GUEST_GS_AR_BYTES
,
342 vmreadz(GUEST_GS_SELECTOR
) == 0 ? 0x10000 : 0xc093);
343 vmwrite(GUEST_LDTR_AR_BYTES
, 0x10000);
344 vmwrite(GUEST_TR_AR_BYTES
, 0x8b);
345 vmwrite(GUEST_INTERRUPTIBILITY_INFO
, 0);
346 vmwrite(GUEST_ACTIVITY_STATE
, 0);
347 vmwrite(GUEST_SYSENTER_CS
, vmreadz(HOST_IA32_SYSENTER_CS
));
348 vmwrite(VMX_PREEMPTION_TIMER_VALUE
, 0);
350 vmwrite(GUEST_CR0
, vmreadz(HOST_CR0
));
351 vmwrite(GUEST_CR3
, vmreadz(HOST_CR3
));
352 vmwrite(GUEST_CR4
, vmreadz(HOST_CR4
));
353 vmwrite(GUEST_ES_BASE
, 0);
354 vmwrite(GUEST_CS_BASE
, 0);
355 vmwrite(GUEST_SS_BASE
, 0);
356 vmwrite(GUEST_DS_BASE
, 0);
357 vmwrite(GUEST_FS_BASE
, vmreadz(HOST_FS_BASE
));
358 vmwrite(GUEST_GS_BASE
, vmreadz(HOST_GS_BASE
));
359 vmwrite(GUEST_LDTR_BASE
, 0);
360 vmwrite(GUEST_TR_BASE
, vmreadz(HOST_TR_BASE
));
361 vmwrite(GUEST_GDTR_BASE
, vmreadz(HOST_GDTR_BASE
));
362 vmwrite(GUEST_IDTR_BASE
, vmreadz(HOST_IDTR_BASE
));
363 vmwrite(GUEST_DR7
, 0x400);
364 vmwrite(GUEST_RSP
, (uint64_t)rsp
);
365 vmwrite(GUEST_RIP
, (uint64_t)rip
);
366 vmwrite(GUEST_RFLAGS
, 2);
367 vmwrite(GUEST_PENDING_DBG_EXCEPTIONS
, 0);
368 vmwrite(GUEST_SYSENTER_ESP
, vmreadz(HOST_IA32_SYSENTER_ESP
));
369 vmwrite(GUEST_SYSENTER_EIP
, vmreadz(HOST_IA32_SYSENTER_EIP
));
372 void prepare_vmcs(struct vmx_pages
*vmx
, void *guest_rip
, void *guest_rsp
)
374 init_vmcs_control_fields(vmx
);
375 init_vmcs_host_state();
376 init_vmcs_guest_state(guest_rip
, guest_rsp
);
379 void nested_vmx_check_supported(void)
381 struct kvm_cpuid_entry2
*entry
= kvm_get_supported_cpuid_entry(1);
383 if (!(entry
->ecx
& CPUID_VMX
)) {
384 print_skip("nested VMX not enabled");
389 void nested_pg_map(struct vmx_pages
*vmx
, struct kvm_vm
*vm
,
390 uint64_t nested_paddr
, uint64_t paddr
, uint32_t eptp_memslot
)
393 struct eptPageTableEntry
*pml4e
;
395 TEST_ASSERT(vm
->mode
== VM_MODE_PXXV48_4K
, "Attempt to use "
396 "unknown or unsupported guest mode, mode: 0x%x", vm
->mode
);
398 TEST_ASSERT((nested_paddr
% vm
->page_size
) == 0,
399 "Nested physical address not on page boundary,\n"
400 " nested_paddr: 0x%lx vm->page_size: 0x%x",
401 nested_paddr
, vm
->page_size
);
402 TEST_ASSERT((nested_paddr
>> vm
->page_shift
) <= vm
->max_gfn
,
403 "Physical address beyond beyond maximum supported,\n"
404 " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
405 paddr
, vm
->max_gfn
, vm
->page_size
);
406 TEST_ASSERT((paddr
% vm
->page_size
) == 0,
407 "Physical address not on page boundary,\n"
408 " paddr: 0x%lx vm->page_size: 0x%x",
409 paddr
, vm
->page_size
);
410 TEST_ASSERT((paddr
>> vm
->page_shift
) <= vm
->max_gfn
,
411 "Physical address beyond beyond maximum supported,\n"
412 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
413 paddr
, vm
->max_gfn
, vm
->page_size
);
415 index
[0] = (nested_paddr
>> 12) & 0x1ffu
;
416 index
[1] = (nested_paddr
>> 21) & 0x1ffu
;
417 index
[2] = (nested_paddr
>> 30) & 0x1ffu
;
418 index
[3] = (nested_paddr
>> 39) & 0x1ffu
;
420 /* Allocate page directory pointer table if not present. */
421 pml4e
= vmx
->eptp_hva
;
422 if (!pml4e
[index
[3]].readable
) {
423 pml4e
[index
[3]].address
= vm_phy_page_alloc(vm
,
424 KVM_EPT_PAGE_TABLE_MIN_PADDR
, eptp_memslot
)
426 pml4e
[index
[3]].writable
= true;
427 pml4e
[index
[3]].readable
= true;
428 pml4e
[index
[3]].executable
= true;
431 /* Allocate page directory table if not present. */
432 struct eptPageTableEntry
*pdpe
;
433 pdpe
= addr_gpa2hva(vm
, pml4e
[index
[3]].address
* vm
->page_size
);
434 if (!pdpe
[index
[2]].readable
) {
435 pdpe
[index
[2]].address
= vm_phy_page_alloc(vm
,
436 KVM_EPT_PAGE_TABLE_MIN_PADDR
, eptp_memslot
)
438 pdpe
[index
[2]].writable
= true;
439 pdpe
[index
[2]].readable
= true;
440 pdpe
[index
[2]].executable
= true;
443 /* Allocate page table if not present. */
444 struct eptPageTableEntry
*pde
;
445 pde
= addr_gpa2hva(vm
, pdpe
[index
[2]].address
* vm
->page_size
);
446 if (!pde
[index
[1]].readable
) {
447 pde
[index
[1]].address
= vm_phy_page_alloc(vm
,
448 KVM_EPT_PAGE_TABLE_MIN_PADDR
, eptp_memslot
)
450 pde
[index
[1]].writable
= true;
451 pde
[index
[1]].readable
= true;
452 pde
[index
[1]].executable
= true;
455 /* Fill in page table entry. */
456 struct eptPageTableEntry
*pte
;
457 pte
= addr_gpa2hva(vm
, pde
[index
[1]].address
* vm
->page_size
);
458 pte
[index
[0]].address
= paddr
>> vm
->page_shift
;
459 pte
[index
[0]].writable
= true;
460 pte
[index
[0]].readable
= true;
461 pte
[index
[0]].executable
= true;
464 * For now mark these as accessed and dirty because the only
465 * testcase we have needs that. Can be reconsidered later.
467 pte
[index
[0]].accessed
= true;
468 pte
[index
[0]].dirty
= true;
472 * Map a range of EPT guest physical addresses to the VM's physical address
475 * vm - Virtual Machine
476 * nested_paddr - Nested guest physical address to map
477 * paddr - VM Physical Address
478 * size - The size of the range to map
479 * eptp_memslot - Memory region slot for new virtual translation tables
485 * Within the VM given by vm, creates a nested guest translation for the
486 * page range starting at nested_paddr to the page range starting at paddr.
488 void nested_map(struct vmx_pages
*vmx
, struct kvm_vm
*vm
,
489 uint64_t nested_paddr
, uint64_t paddr
, uint64_t size
,
490 uint32_t eptp_memslot
)
492 size_t page_size
= vm
->page_size
;
493 size_t npages
= size
/ page_size
;
495 TEST_ASSERT(nested_paddr
+ size
> nested_paddr
, "Vaddr overflow");
496 TEST_ASSERT(paddr
+ size
> paddr
, "Paddr overflow");
499 nested_pg_map(vmx
, vm
, nested_paddr
, paddr
, eptp_memslot
);
500 nested_paddr
+= page_size
;
505 /* Prepare an identity extended page table that maps all the
506 * physical pages in VM.
508 void nested_map_memslot(struct vmx_pages
*vmx
, struct kvm_vm
*vm
,
509 uint32_t memslot
, uint32_t eptp_memslot
)
511 sparsebit_idx_t i
, last
;
512 struct userspace_mem_region
*region
=
513 memslot2region(vm
, memslot
);
515 i
= (region
->region
.guest_phys_addr
>> vm
->page_shift
) - 1;
516 last
= i
+ (region
->region
.memory_size
>> vm
->page_shift
);
518 i
= sparsebit_next_clear(region
->unused_phy_pages
, i
);
523 (uint64_t)i
<< vm
->page_shift
,
524 (uint64_t)i
<< vm
->page_shift
,
530 void prepare_eptp(struct vmx_pages
*vmx
, struct kvm_vm
*vm
,
531 uint32_t eptp_memslot
)
533 vmx
->eptp
= (void *)vm_vaddr_alloc(vm
, getpagesize(), 0x10000, 0, 0);
534 vmx
->eptp_hva
= addr_gva2hva(vm
, (uintptr_t)vmx
->eptp
);
535 vmx
->eptp_gpa
= addr_gva2gpa(vm
, (uintptr_t)vmx
->eptp
);