1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * tools/testing/selftests/kvm/include/kvm_util.h
5 * Copyright (C) 2018, Google LLC.
7 #ifndef SELFTEST_KVM_UTIL_H
8 #define SELFTEST_KVM_UTIL_H
10 #include "test_util.h"
13 #include "linux/kvm.h"
14 #include <sys/ioctl.h>
16 #include "sparsebit.h"
20 * Callers of kvm_util only have an incomplete/opaque description of the
21 * structure kvm_util is using to maintain the state of a VM.
25 typedef uint64_t vm_paddr_t
; /* Virtual Machine (Guest) physical address */
26 typedef uint64_t vm_vaddr_t
; /* Virtual Machine (Guest) virtual address */
28 /* Minimum allocated guest virtual and physical addresses */
29 #define KVM_UTIL_MIN_VADDR 0x2000
31 #define DEFAULT_GUEST_PHY_PAGES 512
32 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
33 #define DEFAULT_STACK_PGS 5
42 VM_MODE_PXXV48_4K
, /* For 48bits VA but ANY bits PA */
46 #if defined(__aarch64__)
47 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K
48 #elif defined(__x86_64__)
49 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
51 #define VM_MODE_DEFAULT VM_MODE_P52V48_4K
54 #define vm_guest_mode_string(m) vm_guest_mode_string[m]
55 extern const char * const vm_guest_mode_string
[];
57 enum vm_mem_backing_src_type
{
59 VM_MEM_SRC_ANONYMOUS_THP
,
60 VM_MEM_SRC_ANONYMOUS_HUGETLB
,
63 int kvm_check_cap(long cap
);
64 int vm_enable_cap(struct kvm_vm
*vm
, struct kvm_enable_cap
*cap
);
66 struct kvm_vm
*vm_create(enum vm_guest_mode mode
, uint64_t phy_pages
, int perm
);
67 struct kvm_vm
*_vm_create(enum vm_guest_mode mode
, uint64_t phy_pages
, int perm
);
68 void kvm_vm_free(struct kvm_vm
*vmp
);
69 void kvm_vm_restart(struct kvm_vm
*vmp
, int perm
);
70 void kvm_vm_release(struct kvm_vm
*vmp
);
71 void kvm_vm_get_dirty_log(struct kvm_vm
*vm
, int slot
, void *log
);
72 void kvm_vm_clear_dirty_log(struct kvm_vm
*vm
, int slot
, void *log
,
73 uint64_t first_page
, uint32_t num_pages
);
75 int kvm_memcmp_hva_gva(void *hva
, struct kvm_vm
*vm
, const vm_vaddr_t gva
,
78 void kvm_vm_elf_load(struct kvm_vm
*vm
, const char *filename
,
79 uint32_t data_memslot
, uint32_t pgd_memslot
);
81 void vm_dump(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
);
87 * stream - Output FILE stream
88 * vm - Virtual Machine
90 * indent - Left margin indent amount
96 * Dumps the current state of the VCPU specified by @vcpuid, within the VM
97 * given by @vm, to the FILE stream given by @stream.
99 void vcpu_dump(FILE *stream
, struct kvm_vm
*vm
, uint32_t vcpuid
,
102 void vm_create_irqchip(struct kvm_vm
*vm
);
104 void vm_userspace_mem_region_add(struct kvm_vm
*vm
,
105 enum vm_mem_backing_src_type src_type
,
106 uint64_t guest_paddr
, uint32_t slot
, uint64_t npages
,
109 void vcpu_ioctl(struct kvm_vm
*vm
, uint32_t vcpuid
, unsigned long ioctl
,
111 int _vcpu_ioctl(struct kvm_vm
*vm
, uint32_t vcpuid
, unsigned long ioctl
,
113 void vm_ioctl(struct kvm_vm
*vm
, unsigned long ioctl
, void *arg
);
114 void vm_mem_region_set_flags(struct kvm_vm
*vm
, uint32_t slot
, uint32_t flags
);
115 void vm_mem_region_move(struct kvm_vm
*vm
, uint32_t slot
, uint64_t new_gpa
);
116 void vm_vcpu_add(struct kvm_vm
*vm
, uint32_t vcpuid
);
117 vm_vaddr_t
vm_vaddr_alloc(struct kvm_vm
*vm
, size_t sz
, vm_vaddr_t vaddr_min
,
118 uint32_t data_memslot
, uint32_t pgd_memslot
);
119 void virt_map(struct kvm_vm
*vm
, uint64_t vaddr
, uint64_t paddr
,
120 unsigned int npages
, uint32_t pgd_memslot
);
121 void *addr_gpa2hva(struct kvm_vm
*vm
, vm_paddr_t gpa
);
122 void *addr_gva2hva(struct kvm_vm
*vm
, vm_vaddr_t gva
);
123 vm_paddr_t
addr_hva2gpa(struct kvm_vm
*vm
, void *hva
);
126 * Address Guest Virtual to Guest Physical
129 * vm - Virtual Machine
130 * gva - VM virtual address
135 * Equivalent VM physical address
137 * Returns the VM physical address of the translated VM virtual
138 * address given by @gva.
140 vm_paddr_t
addr_gva2gpa(struct kvm_vm
*vm
, vm_vaddr_t gva
);
142 struct kvm_run
*vcpu_state(struct kvm_vm
*vm
, uint32_t vcpuid
);
143 void vcpu_run(struct kvm_vm
*vm
, uint32_t vcpuid
);
144 int _vcpu_run(struct kvm_vm
*vm
, uint32_t vcpuid
);
145 void vcpu_run_complete_io(struct kvm_vm
*vm
, uint32_t vcpuid
);
146 void vcpu_set_guest_debug(struct kvm_vm
*vm
, uint32_t vcpuid
,
147 struct kvm_guest_debug
*debug
);
148 void vcpu_set_mp_state(struct kvm_vm
*vm
, uint32_t vcpuid
,
149 struct kvm_mp_state
*mp_state
);
150 void vcpu_regs_get(struct kvm_vm
*vm
, uint32_t vcpuid
, struct kvm_regs
*regs
);
151 void vcpu_regs_set(struct kvm_vm
*vm
, uint32_t vcpuid
, struct kvm_regs
*regs
);
157 * vm - Virtual Machine
159 * num - number of arguments
160 * ... - arguments, each of type uint64_t
166 * Sets the first @num function input registers of the VCPU with @vcpuid,
167 * per the C calling convention of the architecture, to the values given
168 * as variable args. Each of the variable args is expected to be of type
169 * uint64_t. The maximum @num can be is specific to the architecture.
171 void vcpu_args_set(struct kvm_vm
*vm
, uint32_t vcpuid
, unsigned int num
, ...);
173 void vcpu_sregs_get(struct kvm_vm
*vm
, uint32_t vcpuid
,
174 struct kvm_sregs
*sregs
);
175 void vcpu_sregs_set(struct kvm_vm
*vm
, uint32_t vcpuid
,
176 struct kvm_sregs
*sregs
);
177 int _vcpu_sregs_set(struct kvm_vm
*vm
, uint32_t vcpuid
,
178 struct kvm_sregs
*sregs
);
179 void vcpu_fpu_get(struct kvm_vm
*vm
, uint32_t vcpuid
,
180 struct kvm_fpu
*fpu
);
181 void vcpu_fpu_set(struct kvm_vm
*vm
, uint32_t vcpuid
,
182 struct kvm_fpu
*fpu
);
183 void vcpu_get_reg(struct kvm_vm
*vm
, uint32_t vcpuid
, struct kvm_one_reg
*reg
);
184 void vcpu_set_reg(struct kvm_vm
*vm
, uint32_t vcpuid
, struct kvm_one_reg
*reg
);
185 #ifdef __KVM_HAVE_VCPU_EVENTS
186 void vcpu_events_get(struct kvm_vm
*vm
, uint32_t vcpuid
,
187 struct kvm_vcpu_events
*events
);
188 void vcpu_events_set(struct kvm_vm
*vm
, uint32_t vcpuid
,
189 struct kvm_vcpu_events
*events
);
192 void vcpu_nested_state_get(struct kvm_vm
*vm
, uint32_t vcpuid
,
193 struct kvm_nested_state
*state
);
194 int vcpu_nested_state_set(struct kvm_vm
*vm
, uint32_t vcpuid
,
195 struct kvm_nested_state
*state
, bool ignore_error
);
198 const char *exit_reason_str(unsigned int exit_reason
);
200 void virt_pgd_alloc(struct kvm_vm
*vm
, uint32_t pgd_memslot
);
203 * VM Virtual Page Map
206 * vm - Virtual Machine
207 * vaddr - VM Virtual Address
208 * paddr - VM Physical Address
209 * memslot - Memory region slot for new virtual translation tables
215 * Within @vm, creates a virtual translation for the page starting
216 * at @vaddr to the page starting at @paddr.
218 void virt_pg_map(struct kvm_vm
*vm
, uint64_t vaddr
, uint64_t paddr
,
221 vm_paddr_t
vm_phy_page_alloc(struct kvm_vm
*vm
, vm_paddr_t paddr_min
,
223 vm_paddr_t
vm_phy_pages_alloc(struct kvm_vm
*vm
, size_t num
,
224 vm_paddr_t paddr_min
, uint32_t memslot
);
227 * Create a VM with reasonable defaults
230 * vcpuid - The id of the single VCPU to add to the VM.
231 * extra_mem_pages - The number of extra pages to add (this will
232 * decide how much extra space we will need to
233 * setup the page tables using memslot 0)
234 * guest_code - The vCPU's entry point
239 * Pointer to opaque structure that describes the created VM.
241 struct kvm_vm
*vm_create_default(uint32_t vcpuid
, uint64_t extra_mem_pages
,
245 * Adds a vCPU with reasonable defaults (e.g. a stack)
248 * vm - Virtual Machine
249 * vcpuid - The id of the VCPU to add to the VM.
250 * guest_code - The vCPU's entry point
252 void vm_vcpu_add_default(struct kvm_vm
*vm
, uint32_t vcpuid
, void *guest_code
);
254 bool vm_is_unrestricted_guest(struct kvm_vm
*vm
);
256 unsigned int vm_get_page_size(struct kvm_vm
*vm
);
257 unsigned int vm_get_page_shift(struct kvm_vm
*vm
);
258 unsigned int vm_get_max_gfn(struct kvm_vm
*vm
);
260 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode
, size_t size
);
261 unsigned int vm_num_host_pages(enum vm_guest_mode mode
, unsigned int num_guest_pages
);
262 unsigned int vm_num_guest_pages(enum vm_guest_mode mode
, unsigned int num_host_pages
);
263 static inline unsigned int
264 vm_adjust_num_guest_pages(enum vm_guest_mode mode
, unsigned int num_guest_pages
)
267 n
= vm_num_guest_pages(mode
, vm_num_host_pages(mode
, num_guest_pages
));
269 /* s390 requires 1M aligned guest sizes */
270 n
= (n
+ 255) & ~255;
275 struct kvm_userspace_memory_region
*
276 kvm_userspace_memory_region_find(struct kvm_vm
*vm
, uint64_t start
,
279 struct kvm_dirty_log
*
280 allocate_kvm_dirty_log(struct kvm_userspace_memory_region
*region
);
282 int vm_create_device(struct kvm_vm
*vm
, struct kvm_create_device
*cd
);
284 #define sync_global_to_guest(vm, g) ({ \
285 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
286 memcpy(_p, &(g), sizeof(g)); \
289 #define sync_global_from_guest(vm, g) ({ \
290 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
291 memcpy(&(g), _p, sizeof(g)); \
302 #define UCALL_MAX_ARGS 6
306 uint64_t args
[UCALL_MAX_ARGS
];
309 void ucall_init(struct kvm_vm
*vm
, void *arg
);
310 void ucall_uninit(struct kvm_vm
*vm
);
311 void ucall(uint64_t cmd
, int nargs
, ...);
312 uint64_t get_ucall(struct kvm_vm
*vm
, uint32_t vcpu_id
, struct ucall
*uc
);
314 #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
315 #define GUEST_DONE() ucall(UCALL_DONE, 0)
316 #define GUEST_ASSERT(_condition) do { \
318 ucall(UCALL_ABORT, 2, \
319 "Failed guest assert: " \
320 #_condition, __LINE__); \
323 #endif /* SELFTEST_KVM_UTIL_H */