]>
Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
783e9e51 PB |
2 | /* |
3 | * tools/testing/selftests/kvm/lib/kvm_util.c | |
4 | * | |
5 | * Copyright (C) 2018, Google LLC. | |
783e9e51 PB |
6 | */ |
7 | ||
8 | #include "test_util.h" | |
9 | #include "kvm_util.h" | |
10 | #include "kvm_util_internal.h" | |
567a9f1e | 11 | #include "processor.h" |
783e9e51 PB |
12 | |
13 | #include <assert.h> | |
14 | #include <sys/mman.h> | |
15 | #include <sys/types.h> | |
16 | #include <sys/stat.h> | |
bc8eb2fe | 17 | #include <linux/kernel.h> |
783e9e51 | 18 | |
783e9e51 | 19 | #define KVM_UTIL_PGS_PER_HUGEPG 512 |
81d1cca0 | 20 | #define KVM_UTIL_MIN_PFN 2 |
783e9e51 PB |
21 | |
22 | /* Aligns x up to the next multiple of size. Size must be a power of 2. */ | |
23 | static void *align(void *x, size_t size) | |
24 | { | |
25 | size_t mask = size - 1; | |
26 | TEST_ASSERT(size != 0 && !(size & (size - 1)), | |
27 | "size not a power of 2: %lu", size); | |
28 | return (void *) (((size_t) x + mask) & ~mask); | |
29 | } | |
30 | ||
eabe7881 AJ |
31 | /* |
32 | * Capability | |
783e9e51 PB |
33 | * |
34 | * Input Args: | |
35 | * cap - Capability | |
36 | * | |
37 | * Output Args: None | |
38 | * | |
39 | * Return: | |
40 | * On success, the Value corresponding to the capability (KVM_CAP_*) | |
41 | * specified by the value of cap. On failure a TEST_ASSERT failure | |
42 | * is produced. | |
43 | * | |
44 | * Looks up and returns the value corresponding to the capability | |
45 | * (KVM_CAP_*) given by cap. | |
46 | */ | |
47 | int kvm_check_cap(long cap) | |
48 | { | |
49 | int ret; | |
50 | int kvm_fd; | |
51 | ||
52 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | |
bcb2b94a PB |
53 | if (kvm_fd < 0) |
54 | exit(KSFT_SKIP); | |
783e9e51 PB |
55 | |
56 | ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); | |
57 | TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" | |
58 | " rc: %i errno: %i", ret, errno); | |
59 | ||
60 | close(kvm_fd); | |
61 | ||
62 | return ret; | |
63 | } | |
64 | ||
8b56ee91 DS |
65 | /* VM Enable Capability |
66 | * | |
67 | * Input Args: | |
68 | * vm - Virtual Machine | |
69 | * cap - Capability | |
70 | * | |
71 | * Output Args: None | |
72 | * | |
73 | * Return: On success, 0. On failure a TEST_ASSERT failure is produced. | |
74 | * | |
75 | * Enables a capability (KVM_CAP_*) on the VM. | |
76 | */ | |
77 | int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) | |
78 | { | |
79 | int ret; | |
80 | ||
81 | ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); | |
82 | TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" | |
83 | " rc: %i errno: %i", ret, errno); | |
84 | ||
85 | return ret; | |
86 | } | |
87 | ||
12c386b2 | 88 | static void vm_open(struct kvm_vm *vm, int perm) |
fa3899ad PB |
89 | { |
90 | vm->kvm_fd = open(KVM_DEV_PATH, perm); | |
91 | if (vm->kvm_fd < 0) | |
92 | exit(KSFT_SKIP); | |
93 | ||
c68c21ca | 94 | if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { |
d0aac332 | 95 | print_skip("immediate_exit not available"); |
c68c21ca PB |
96 | exit(KSFT_SKIP); |
97 | } | |
98 | ||
12c386b2 | 99 | vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type); |
fa3899ad PB |
100 | TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " |
101 | "rc: %i errno: %i", vm->fd, errno); | |
102 | } | |
103 | ||
81d1cca0 | 104 | const char * const vm_guest_mode_string[] = { |
567a9f1e PX |
105 | "PA-bits:52, VA-bits:48, 4K pages", |
106 | "PA-bits:52, VA-bits:48, 64K pages", | |
107 | "PA-bits:48, VA-bits:48, 4K pages", | |
108 | "PA-bits:48, VA-bits:48, 64K pages", | |
109 | "PA-bits:40, VA-bits:48, 4K pages", | |
110 | "PA-bits:40, VA-bits:48, 64K pages", | |
111 | "PA-bits:ANY, VA-bits:48, 4K pages", | |
81d1cca0 | 112 | }; |
cdbd2428 AJ |
113 | _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES, |
114 | "Missing new mode strings?"); | |
81d1cca0 | 115 | |
377a41c9 AJ |
116 | struct vm_guest_mode_params { |
117 | unsigned int pa_bits; | |
118 | unsigned int va_bits; | |
119 | unsigned int page_size; | |
120 | unsigned int page_shift; | |
121 | }; | |
122 | ||
123 | static const struct vm_guest_mode_params vm_guest_mode_params[] = { | |
124 | { 52, 48, 0x1000, 12 }, | |
125 | { 52, 48, 0x10000, 16 }, | |
126 | { 48, 48, 0x1000, 12 }, | |
127 | { 48, 48, 0x10000, 16 }, | |
128 | { 40, 48, 0x1000, 12 }, | |
129 | { 40, 48, 0x10000, 16 }, | |
130 | { 0, 0, 0x1000, 12 }, | |
131 | }; | |
132 | _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, | |
133 | "Missing new mode params?"); | |
134 | ||
eabe7881 AJ |
135 | /* |
136 | * VM Create | |
783e9e51 PB |
137 | * |
138 | * Input Args: | |
81d1cca0 | 139 | * mode - VM Mode (e.g. VM_MODE_P52V48_4K) |
783e9e51 PB |
140 | * phy_pages - Physical memory pages |
141 | * perm - permission | |
142 | * | |
143 | * Output Args: None | |
144 | * | |
145 | * Return: | |
146 | * Pointer to opaque structure that describes the created VM. | |
147 | * | |
81d1cca0 | 148 | * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). |
783e9e51 PB |
149 | * When phy_pages is non-zero, a memory region of phy_pages physical pages |
150 | * is created and mapped starting at guest physical address 0. The file | |
151 | * descriptor to control the created VM is created with the permissions | |
152 | * given by perm (e.g. O_RDWR). | |
153 | */ | |
12c386b2 | 154 | struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) |
783e9e51 PB |
155 | { |
156 | struct kvm_vm *vm; | |
783e9e51 | 157 | |
3439d886 AJ |
158 | pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__, |
159 | vm_guest_mode_string(mode), phy_pages, perm); | |
52200d0d | 160 | |
783e9e51 | 161 | vm = calloc(1, sizeof(*vm)); |
717da97e | 162 | TEST_ASSERT(vm != NULL, "Insufficient Memory"); |
783e9e51 PB |
163 | |
164 | vm->mode = mode; | |
12c386b2 | 165 | vm->type = 0; |
783e9e51 | 166 | |
377a41c9 AJ |
167 | vm->pa_bits = vm_guest_mode_params[mode].pa_bits; |
168 | vm->va_bits = vm_guest_mode_params[mode].va_bits; | |
169 | vm->page_size = vm_guest_mode_params[mode].page_size; | |
170 | vm->page_shift = vm_guest_mode_params[mode].page_shift; | |
171 | ||
783e9e51 PB |
172 | /* Setup mode specific traits. */ |
173 | switch (vm->mode) { | |
81d1cca0 | 174 | case VM_MODE_P52V48_4K: |
7a6629ef | 175 | vm->pgtable_levels = 4; |
783e9e51 | 176 | break; |
81d1cca0 AJ |
177 | case VM_MODE_P52V48_64K: |
178 | vm->pgtable_levels = 3; | |
cdbd2428 AJ |
179 | break; |
180 | case VM_MODE_P48V48_4K: | |
181 | vm->pgtable_levels = 4; | |
cdbd2428 AJ |
182 | break; |
183 | case VM_MODE_P48V48_64K: | |
184 | vm->pgtable_levels = 3; | |
81d1cca0 | 185 | break; |
e28934e6 AJ |
186 | case VM_MODE_P40V48_4K: |
187 | vm->pgtable_levels = 4; | |
e28934e6 AJ |
188 | break; |
189 | case VM_MODE_P40V48_64K: | |
190 | vm->pgtable_levels = 3; | |
e28934e6 | 191 | break; |
567a9f1e PX |
192 | case VM_MODE_PXXV48_4K: |
193 | #ifdef __x86_64__ | |
194 | kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); | |
195 | TEST_ASSERT(vm->va_bits == 48, "Linear address width " | |
196 | "(%d bits) not supported", vm->va_bits); | |
3439d886 AJ |
197 | pr_debug("Guest physical address width detected: %d\n", |
198 | vm->pa_bits); | |
377a41c9 | 199 | vm->pgtable_levels = 4; |
567a9f1e | 200 | #else |
352be2c5 | 201 | TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); |
567a9f1e PX |
202 | #endif |
203 | break; | |
783e9e51 | 204 | default: |
352be2c5 | 205 | TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); |
783e9e51 PB |
206 | } |
207 | ||
12c386b2 PX |
208 | #ifdef __aarch64__ |
209 | if (vm->pa_bits != 40) | |
210 | vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); | |
211 | #endif | |
212 | ||
213 | vm_open(vm, perm); | |
214 | ||
81d1cca0 AJ |
215 | /* Limit to VA-bit canonical virtual addresses. */ |
216 | vm->vpages_valid = sparsebit_alloc(); | |
217 | sparsebit_set_num(vm->vpages_valid, | |
218 | 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); | |
219 | sparsebit_set_num(vm->vpages_valid, | |
220 | (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, | |
221 | (1ULL << (vm->va_bits - 1)) >> vm->page_shift); | |
222 | ||
223 | /* Limit physical addresses to PA-bits. */ | |
224 | vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; | |
225 | ||
783e9e51 PB |
226 | /* Allocate and setup memory for guest. */ |
227 | vm->vpages_mapped = sparsebit_alloc(); | |
228 | if (phy_pages != 0) | |
229 | vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, | |
230 | 0, 0, phy_pages, 0); | |
231 | ||
232 | return vm; | |
233 | } | |
234 | ||
8cee5816 AJ |
235 | struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) |
236 | { | |
12c386b2 | 237 | return _vm_create(mode, phy_pages, perm); |
8cee5816 AJ |
238 | } |
239 | ||
eabe7881 AJ |
240 | /* |
241 | * VM Restart | |
fa3899ad PB |
242 | * |
243 | * Input Args: | |
244 | * vm - VM that has been released before | |
245 | * perm - permission | |
246 | * | |
247 | * Output Args: None | |
248 | * | |
249 | * Reopens the file descriptors associated to the VM and reinstates the | |
250 | * global state, such as the irqchip and the memory regions that are mapped | |
251 | * into the guest. | |
252 | */ | |
253 | void kvm_vm_restart(struct kvm_vm *vmp, int perm) | |
254 | { | |
255 | struct userspace_mem_region *region; | |
256 | ||
12c386b2 | 257 | vm_open(vmp, perm); |
fa3899ad PB |
258 | if (vmp->has_irqchip) |
259 | vm_create_irqchip(vmp); | |
260 | ||
261 | for (region = vmp->userspace_mem_region_head; region; | |
262 | region = region->next) { | |
263 | int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
264 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" | |
265 | " rc: %i errno: %i\n" | |
266 | " slot: %u flags: 0x%x\n" | |
d9eaf19e | 267 | " guest_phys_addr: 0x%llx size: 0x%llx", |
eabe7881 AJ |
268 | ret, errno, region->region.slot, |
269 | region->region.flags, | |
fa3899ad PB |
270 | region->region.guest_phys_addr, |
271 | region->region.memory_size); | |
272 | } | |
273 | } | |
274 | ||
3b4cd0ff PX |
275 | void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) |
276 | { | |
277 | struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; | |
278 | int ret; | |
279 | ||
280 | ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); | |
281 | TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s", | |
d9eaf19e | 282 | __func__, strerror(-ret)); |
3b4cd0ff PX |
283 | } |
284 | ||
2a31b9db PB |
285 | void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, |
286 | uint64_t first_page, uint32_t num_pages) | |
287 | { | |
288 | struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot, | |
289 | .first_page = first_page, | |
290 | .num_pages = num_pages }; | |
291 | int ret; | |
292 | ||
293 | ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); | |
294 | TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s", | |
d9eaf19e | 295 | __func__, strerror(-ret)); |
2a31b9db PB |
296 | } |
297 | ||
eabe7881 AJ |
298 | /* |
299 | * Userspace Memory Region Find | |
783e9e51 PB |
300 | * |
301 | * Input Args: | |
302 | * vm - Virtual Machine | |
303 | * start - Starting VM physical address | |
304 | * end - Ending VM physical address, inclusive. | |
305 | * | |
306 | * Output Args: None | |
307 | * | |
308 | * Return: | |
309 | * Pointer to overlapping region, NULL if no such region. | |
310 | * | |
311 | * Searches for a region with any physical memory that overlaps with | |
312 | * any portion of the guest physical addresses from start to end | |
313 | * inclusive. If multiple overlapping regions exist, a pointer to any | |
314 | * of the regions is returned. Null is returned only when no overlapping | |
315 | * region exists. | |
316 | */ | |
eabe7881 AJ |
317 | static struct userspace_mem_region * |
318 | userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) | |
783e9e51 PB |
319 | { |
320 | struct userspace_mem_region *region; | |
321 | ||
322 | for (region = vm->userspace_mem_region_head; region; | |
323 | region = region->next) { | |
324 | uint64_t existing_start = region->region.guest_phys_addr; | |
325 | uint64_t existing_end = region->region.guest_phys_addr | |
326 | + region->region.memory_size - 1; | |
327 | if (start <= existing_end && end >= existing_start) | |
328 | return region; | |
329 | } | |
330 | ||
331 | return NULL; | |
332 | } | |
333 | ||
eabe7881 AJ |
334 | /* |
335 | * KVM Userspace Memory Region Find | |
783e9e51 PB |
336 | * |
337 | * Input Args: | |
338 | * vm - Virtual Machine | |
339 | * start - Starting VM physical address | |
340 | * end - Ending VM physical address, inclusive. | |
341 | * | |
342 | * Output Args: None | |
343 | * | |
344 | * Return: | |
345 | * Pointer to overlapping region, NULL if no such region. | |
346 | * | |
347 | * Public interface to userspace_mem_region_find. Allows tests to look up | |
348 | * the memslot datastructure for a given range of guest physical memory. | |
349 | */ | |
350 | struct kvm_userspace_memory_region * | |
351 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, | |
352 | uint64_t end) | |
353 | { | |
354 | struct userspace_mem_region *region; | |
355 | ||
356 | region = userspace_mem_region_find(vm, start, end); | |
357 | if (!region) | |
358 | return NULL; | |
359 | ||
360 | return ®ion->region; | |
361 | } | |
362 | ||
eabe7881 AJ |
363 | /* |
364 | * VCPU Find | |
783e9e51 PB |
365 | * |
366 | * Input Args: | |
367 | * vm - Virtual Machine | |
368 | * vcpuid - VCPU ID | |
369 | * | |
370 | * Output Args: None | |
371 | * | |
372 | * Return: | |
373 | * Pointer to VCPU structure | |
374 | * | |
375 | * Locates a vcpu structure that describes the VCPU specified by vcpuid and | |
376 | * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU | |
377 | * for the specified vcpuid. | |
378 | */ | |
eabe7881 | 379 | struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) |
783e9e51 PB |
380 | { |
381 | struct vcpu *vcpup; | |
382 | ||
383 | for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) { | |
384 | if (vcpup->id == vcpuid) | |
385 | return vcpup; | |
386 | } | |
387 | ||
388 | return NULL; | |
389 | } | |
390 | ||
eabe7881 AJ |
391 | /* |
392 | * VM VCPU Remove | |
783e9e51 PB |
393 | * |
394 | * Input Args: | |
395 | * vm - Virtual Machine | |
396 | * vcpuid - VCPU ID | |
397 | * | |
398 | * Output Args: None | |
399 | * | |
400 | * Return: None, TEST_ASSERT failures for all error conditions | |
401 | * | |
402 | * Within the VM specified by vm, removes the VCPU given by vcpuid. | |
403 | */ | |
404 | static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid) | |
405 | { | |
406 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
0a505fe6 | 407 | int ret; |
783e9e51 | 408 | |
0a505fe6 PB |
409 | ret = munmap(vcpu->state, sizeof(*vcpu->state)); |
410 | TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i " | |
411 | "errno: %i", ret, errno); | |
412 | close(vcpu->fd); | |
783e9e51 PB |
413 | TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " |
414 | "errno: %i", ret, errno); | |
415 | ||
416 | if (vcpu->next) | |
417 | vcpu->next->prev = vcpu->prev; | |
418 | if (vcpu->prev) | |
419 | vcpu->prev->next = vcpu->next; | |
420 | else | |
421 | vm->vcpu_head = vcpu->next; | |
422 | free(vcpu); | |
423 | } | |
424 | ||
fa3899ad PB |
425 | void kvm_vm_release(struct kvm_vm *vmp) |
426 | { | |
427 | int ret; | |
428 | ||
fa3899ad PB |
429 | while (vmp->vcpu_head) |
430 | vm_vcpu_rm(vmp, vmp->vcpu_head->id); | |
431 | ||
fa3899ad PB |
432 | ret = close(vmp->fd); |
433 | TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" | |
434 | " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); | |
435 | ||
436 | close(vmp->kvm_fd); | |
437 | TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n" | |
438 | " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); | |
439 | } | |
783e9e51 | 440 | |
eabe7881 AJ |
441 | /* |
442 | * Destroys and frees the VM pointed to by vmp. | |
783e9e51 PB |
443 | */ |
444 | void kvm_vm_free(struct kvm_vm *vmp) | |
445 | { | |
446 | int ret; | |
447 | ||
448 | if (vmp == NULL) | |
449 | return; | |
450 | ||
451 | /* Free userspace_mem_regions. */ | |
452 | while (vmp->userspace_mem_region_head) { | |
453 | struct userspace_mem_region *region | |
454 | = vmp->userspace_mem_region_head; | |
455 | ||
456 | region->region.memory_size = 0; | |
457 | ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, | |
458 | ®ion->region); | |
459 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " | |
460 | "rc: %i errno: %i", ret, errno); | |
461 | ||
462 | vmp->userspace_mem_region_head = region->next; | |
463 | sparsebit_free(®ion->unused_phy_pages); | |
464 | ret = munmap(region->mmap_start, region->mmap_size); | |
465 | TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", | |
466 | ret, errno); | |
467 | ||
468 | free(region); | |
469 | } | |
470 | ||
783e9e51 PB |
471 | /* Free sparsebit arrays. */ |
472 | sparsebit_free(&vmp->vpages_valid); | |
473 | sparsebit_free(&vmp->vpages_mapped); | |
474 | ||
fa3899ad | 475 | kvm_vm_release(vmp); |
783e9e51 PB |
476 | |
477 | /* Free the structure describing the VM. */ | |
478 | free(vmp); | |
479 | } | |
480 | ||
eabe7881 AJ |
481 | /* |
482 | * Memory Compare, host virtual to guest virtual | |
783e9e51 PB |
483 | * |
484 | * Input Args: | |
485 | * hva - Starting host virtual address | |
486 | * vm - Virtual Machine | |
487 | * gva - Starting guest virtual address | |
488 | * len - number of bytes to compare | |
489 | * | |
490 | * Output Args: None | |
491 | * | |
492 | * Input/Output Args: None | |
493 | * | |
494 | * Return: | |
495 | * Returns 0 if the bytes starting at hva for a length of len | |
496 | * are equal the guest virtual bytes starting at gva. Returns | |
497 | * a value < 0, if bytes at hva are less than those at gva. | |
498 | * Otherwise a value > 0 is returned. | |
499 | * | |
500 | * Compares the bytes starting at the host virtual address hva, for | |
501 | * a length of len, to the guest bytes starting at the guest virtual | |
502 | * address given by gva. | |
503 | */ | |
eabe7881 | 504 | int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) |
783e9e51 PB |
505 | { |
506 | size_t amt; | |
507 | ||
eabe7881 AJ |
508 | /* |
509 | * Compare a batch of bytes until either a match is found | |
783e9e51 PB |
510 | * or all the bytes have been compared. |
511 | */ | |
512 | for (uintptr_t offset = 0; offset < len; offset += amt) { | |
513 | uintptr_t ptr1 = (uintptr_t)hva + offset; | |
514 | ||
eabe7881 AJ |
515 | /* |
516 | * Determine host address for guest virtual address | |
783e9e51 PB |
517 | * at offset. |
518 | */ | |
519 | uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); | |
520 | ||
eabe7881 AJ |
521 | /* |
522 | * Determine amount to compare on this pass. | |
783e9e51 PB |
523 | * Don't allow the comparsion to cross a page boundary. |
524 | */ | |
525 | amt = len - offset; | |
526 | if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) | |
527 | amt = vm->page_size - (ptr1 % vm->page_size); | |
528 | if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) | |
529 | amt = vm->page_size - (ptr2 % vm->page_size); | |
530 | ||
531 | assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); | |
532 | assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); | |
533 | ||
eabe7881 AJ |
534 | /* |
535 | * Perform the comparison. If there is a difference | |
783e9e51 PB |
536 | * return that result to the caller, otherwise need |
537 | * to continue on looking for a mismatch. | |
538 | */ | |
539 | int ret = memcmp((void *)ptr1, (void *)ptr2, amt); | |
540 | if (ret != 0) | |
541 | return ret; | |
542 | } | |
543 | ||
eabe7881 AJ |
544 | /* |
545 | * No mismatch found. Let the caller know the two memory | |
783e9e51 PB |
546 | * areas are equal. |
547 | */ | |
548 | return 0; | |
549 | } | |
550 | ||
eabe7881 AJ |
551 | /* |
552 | * VM Userspace Memory Region Add | |
783e9e51 PB |
553 | * |
554 | * Input Args: | |
555 | * vm - Virtual Machine | |
556 | * backing_src - Storage source for this region. | |
557 | * NULL to use anonymous memory. | |
558 | * guest_paddr - Starting guest physical address | |
559 | * slot - KVM region slot | |
560 | * npages - Number of physical pages | |
561 | * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) | |
562 | * | |
563 | * Output Args: None | |
564 | * | |
565 | * Return: None | |
566 | * | |
567 | * Allocates a memory area of the number of pages specified by npages | |
568 | * and maps it to the VM specified by vm, at a starting physical address | |
569 | * given by guest_paddr. The region is created with a KVM region slot | |
570 | * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The | |
571 | * region is created with the flags given by flags. | |
572 | */ | |
573 | void vm_userspace_mem_region_add(struct kvm_vm *vm, | |
574 | enum vm_mem_backing_src_type src_type, | |
575 | uint64_t guest_paddr, uint32_t slot, uint64_t npages, | |
576 | uint32_t flags) | |
577 | { | |
578 | int ret; | |
783e9e51 PB |
579 | struct userspace_mem_region *region; |
580 | size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; | |
da2a2d60 | 581 | size_t alignment; |
783e9e51 | 582 | |
87a802d9 AJ |
583 | TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, |
584 | "Number of guest pages is not compatible with the host. " | |
585 | "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); | |
586 | ||
783e9e51 PB |
587 | TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " |
588 | "address not on a page boundary.\n" | |
589 | " guest_paddr: 0x%lx vm->page_size: 0x%x", | |
590 | guest_paddr, vm->page_size); | |
591 | TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) | |
592 | <= vm->max_gfn, "Physical range beyond maximum " | |
593 | "supported physical address,\n" | |
594 | " guest_paddr: 0x%lx npages: 0x%lx\n" | |
595 | " vm->max_gfn: 0x%lx vm->page_size: 0x%x", | |
596 | guest_paddr, npages, vm->max_gfn, vm->page_size); | |
597 | ||
eabe7881 AJ |
598 | /* |
599 | * Confirm a mem region with an overlapping address doesn't | |
783e9e51 PB |
600 | * already exist. |
601 | */ | |
602 | region = (struct userspace_mem_region *) userspace_mem_region_find( | |
94a980c3 | 603 | vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); |
783e9e51 | 604 | if (region != NULL) |
352be2c5 | 605 | TEST_FAIL("overlapping userspace_mem_region already " |
783e9e51 PB |
606 | "exists\n" |
607 | " requested guest_paddr: 0x%lx npages: 0x%lx " | |
608 | "page_size: 0x%x\n" | |
609 | " existing guest_paddr: 0x%lx size: 0x%lx", | |
610 | guest_paddr, npages, vm->page_size, | |
611 | (uint64_t) region->region.guest_phys_addr, | |
612 | (uint64_t) region->region.memory_size); | |
613 | ||
614 | /* Confirm no region with the requested slot already exists. */ | |
615 | for (region = vm->userspace_mem_region_head; region; | |
616 | region = region->next) { | |
617 | if (region->region.slot == slot) | |
618 | break; | |
783e9e51 PB |
619 | } |
620 | if (region != NULL) | |
352be2c5 | 621 | TEST_FAIL("A mem region with the requested slot " |
94a980c3 | 622 | "already exists.\n" |
783e9e51 PB |
623 | " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" |
624 | " existing slot: %u paddr: 0x%lx size: 0x%lx", | |
625 | slot, guest_paddr, npages, | |
626 | region->region.slot, | |
627 | (uint64_t) region->region.guest_phys_addr, | |
628 | (uint64_t) region->region.memory_size); | |
629 | ||
630 | /* Allocate and initialize new mem region structure. */ | |
631 | region = calloc(1, sizeof(*region)); | |
632 | TEST_ASSERT(region != NULL, "Insufficient Memory"); | |
633 | region->mmap_size = npages * vm->page_size; | |
634 | ||
da2a2d60 TH |
635 | #ifdef __s390x__ |
636 | /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ | |
637 | alignment = 0x100000; | |
638 | #else | |
639 | alignment = 1; | |
640 | #endif | |
641 | ||
783e9e51 | 642 | if (src_type == VM_MEM_SRC_ANONYMOUS_THP) |
da2a2d60 TH |
643 | alignment = max(huge_page_size, alignment); |
644 | ||
645 | /* Add enough memory to align up if necessary */ | |
646 | if (alignment > 1) | |
647 | region->mmap_size += alignment; | |
648 | ||
783e9e51 PB |
649 | region->mmap_start = mmap(NULL, region->mmap_size, |
650 | PROT_READ | PROT_WRITE, | |
651 | MAP_PRIVATE | MAP_ANONYMOUS | |
652 | | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0), | |
653 | -1, 0); | |
654 | TEST_ASSERT(region->mmap_start != MAP_FAILED, | |
655 | "test_malloc failed, mmap_start: %p errno: %i", | |
656 | region->mmap_start, errno); | |
657 | ||
da2a2d60 TH |
658 | /* Align host address */ |
659 | region->host_mem = align(region->mmap_start, alignment); | |
783e9e51 PB |
660 | |
661 | /* As needed perform madvise */ | |
662 | if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) { | |
663 | ret = madvise(region->host_mem, npages * vm->page_size, | |
664 | src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); | |
665 | TEST_ASSERT(ret == 0, "madvise failed,\n" | |
666 | " addr: %p\n" | |
667 | " length: 0x%lx\n" | |
668 | " src_type: %x", | |
669 | region->host_mem, npages * vm->page_size, src_type); | |
670 | } | |
671 | ||
672 | region->unused_phy_pages = sparsebit_alloc(); | |
673 | sparsebit_set_num(region->unused_phy_pages, | |
674 | guest_paddr >> vm->page_shift, npages); | |
675 | region->region.slot = slot; | |
676 | region->region.flags = flags; | |
677 | region->region.guest_phys_addr = guest_paddr; | |
678 | region->region.memory_size = npages * vm->page_size; | |
679 | region->region.userspace_addr = (uintptr_t) region->host_mem; | |
680 | ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
681 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" | |
682 | " rc: %i errno: %i\n" | |
683 | " slot: %u flags: 0x%x\n" | |
684 | " guest_phys_addr: 0x%lx size: 0x%lx", | |
685 | ret, errno, slot, flags, | |
686 | guest_paddr, (uint64_t) region->region.memory_size); | |
687 | ||
688 | /* Add to linked-list of memory regions. */ | |
689 | if (vm->userspace_mem_region_head) | |
690 | vm->userspace_mem_region_head->prev = region; | |
691 | region->next = vm->userspace_mem_region_head; | |
692 | vm->userspace_mem_region_head = region; | |
693 | } | |
694 | ||
eabe7881 AJ |
695 | /* |
696 | * Memslot to region | |
783e9e51 PB |
697 | * |
698 | * Input Args: | |
699 | * vm - Virtual Machine | |
700 | * memslot - KVM memory slot ID | |
701 | * | |
702 | * Output Args: None | |
703 | * | |
704 | * Return: | |
705 | * Pointer to memory region structure that describe memory region | |
706 | * using kvm memory slot ID given by memslot. TEST_ASSERT failure | |
707 | * on error (e.g. currently no memory region using memslot as a KVM | |
708 | * memory slot ID). | |
709 | */ | |
09444420 | 710 | struct userspace_mem_region * |
eabe7881 | 711 | memslot2region(struct kvm_vm *vm, uint32_t memslot) |
783e9e51 PB |
712 | { |
713 | struct userspace_mem_region *region; | |
714 | ||
715 | for (region = vm->userspace_mem_region_head; region; | |
716 | region = region->next) { | |
717 | if (region->region.slot == memslot) | |
718 | break; | |
719 | } | |
720 | if (region == NULL) { | |
721 | fprintf(stderr, "No mem region with the requested slot found,\n" | |
722 | " requested slot: %u\n", memslot); | |
723 | fputs("---- vm dump ----\n", stderr); | |
724 | vm_dump(stderr, vm, 2); | |
352be2c5 | 725 | TEST_FAIL("Mem region not found"); |
783e9e51 PB |
726 | } |
727 | ||
728 | return region; | |
729 | } | |
730 | ||
eabe7881 AJ |
731 | /* |
732 | * VM Memory Region Flags Set | |
783e9e51 PB |
733 | * |
734 | * Input Args: | |
735 | * vm - Virtual Machine | |
736 | * flags - Starting guest physical address | |
737 | * | |
738 | * Output Args: None | |
739 | * | |
740 | * Return: None | |
741 | * | |
742 | * Sets the flags of the memory region specified by the value of slot, | |
743 | * to the values given by flags. | |
744 | */ | |
745 | void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) | |
746 | { | |
747 | int ret; | |
748 | struct userspace_mem_region *region; | |
749 | ||
783e9e51 PB |
750 | region = memslot2region(vm, slot); |
751 | ||
752 | region->region.flags = flags; | |
753 | ||
754 | ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
755 | ||
756 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" | |
757 | " rc: %i errno: %i slot: %u flags: 0x%x", | |
758 | ret, errno, slot, flags); | |
759 | } | |
760 | ||
13e48aa9 SC |
761 | /* |
762 | * VM Memory Region Move | |
763 | * | |
764 | * Input Args: | |
765 | * vm - Virtual Machine | |
766 | * slot - Slot of the memory region to move | |
4b547a86 | 767 | * new_gpa - Starting guest physical address |
13e48aa9 SC |
768 | * |
769 | * Output Args: None | |
770 | * | |
771 | * Return: None | |
772 | * | |
773 | * Change the gpa of a memory region. | |
774 | */ | |
775 | void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) | |
776 | { | |
777 | struct userspace_mem_region *region; | |
778 | int ret; | |
779 | ||
780 | region = memslot2region(vm, slot); | |
781 | ||
782 | region->region.guest_phys_addr = new_gpa; | |
783 | ||
784 | ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
785 | ||
786 | TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n" | |
4b547a86 | 787 | "ret: %i errno: %i slot: %u new_gpa: 0x%lx", |
13e48aa9 SC |
788 | ret, errno, slot, new_gpa); |
789 | } | |
790 | ||
eabe7881 AJ |
791 | /* |
792 | * VCPU mmap Size | |
783e9e51 PB |
793 | * |
794 | * Input Args: None | |
795 | * | |
796 | * Output Args: None | |
797 | * | |
798 | * Return: | |
799 | * Size of VCPU state | |
800 | * | |
801 | * Returns the size of the structure pointed to by the return value | |
802 | * of vcpu_state(). | |
803 | */ | |
804 | static int vcpu_mmap_sz(void) | |
805 | { | |
806 | int dev_fd, ret; | |
807 | ||
808 | dev_fd = open(KVM_DEV_PATH, O_RDONLY); | |
bcb2b94a PB |
809 | if (dev_fd < 0) |
810 | exit(KSFT_SKIP); | |
783e9e51 PB |
811 | |
812 | ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); | |
813 | TEST_ASSERT(ret >= sizeof(struct kvm_run), | |
814 | "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i", | |
815 | __func__, ret, errno); | |
816 | ||
817 | close(dev_fd); | |
818 | ||
819 | return ret; | |
820 | } | |
821 | ||
eabe7881 AJ |
822 | /* |
823 | * VM VCPU Add | |
783e9e51 PB |
824 | * |
825 | * Input Args: | |
826 | * vm - Virtual Machine | |
827 | * vcpuid - VCPU ID | |
828 | * | |
829 | * Output Args: None | |
830 | * | |
831 | * Return: None | |
832 | * | |
837ec79b PB |
833 | * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid. |
834 | * No additional VCPU setup is done. | |
783e9e51 | 835 | */ |
837ec79b | 836 | void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid) |
783e9e51 PB |
837 | { |
838 | struct vcpu *vcpu; | |
839 | ||
840 | /* Confirm a vcpu with the specified id doesn't already exist. */ | |
841 | vcpu = vcpu_find(vm, vcpuid); | |
842 | if (vcpu != NULL) | |
352be2c5 | 843 | TEST_FAIL("vcpu with the specified id " |
783e9e51 PB |
844 | "already exists,\n" |
845 | " requested vcpuid: %u\n" | |
846 | " existing vcpuid: %u state: %p", | |
847 | vcpuid, vcpu->id, vcpu->state); | |
848 | ||
849 | /* Allocate and initialize new vcpu structure. */ | |
850 | vcpu = calloc(1, sizeof(*vcpu)); | |
851 | TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); | |
852 | vcpu->id = vcpuid; | |
853 | vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); | |
854 | TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i", | |
855 | vcpu->fd, errno); | |
856 | ||
857 | TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size " | |
858 | "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", | |
859 | vcpu_mmap_sz(), sizeof(*vcpu->state)); | |
860 | vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state), | |
861 | PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); | |
862 | TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, " | |
863 | "vcpu id: %u errno: %i", vcpuid, errno); | |
864 | ||
865 | /* Add to linked-list of VCPUs. */ | |
866 | if (vm->vcpu_head) | |
867 | vm->vcpu_head->prev = vcpu; | |
868 | vcpu->next = vm->vcpu_head; | |
869 | vm->vcpu_head = vcpu; | |
783e9e51 PB |
870 | } |
871 | ||
eabe7881 AJ |
872 | /* |
873 | * VM Virtual Address Unused Gap | |
783e9e51 PB |
874 | * |
875 | * Input Args: | |
876 | * vm - Virtual Machine | |
877 | * sz - Size (bytes) | |
878 | * vaddr_min - Minimum Virtual Address | |
879 | * | |
880 | * Output Args: None | |
881 | * | |
882 | * Return: | |
883 | * Lowest virtual address at or below vaddr_min, with at least | |
884 | * sz unused bytes. TEST_ASSERT failure if no area of at least | |
885 | * size sz is available. | |
886 | * | |
887 | * Within the VM specified by vm, locates the lowest starting virtual | |
888 | * address >= vaddr_min, that has at least sz unallocated bytes. A | |
889 | * TEST_ASSERT failure occurs for invalid input or no area of at least | |
890 | * sz unallocated bytes >= vaddr_min is available. | |
891 | */ | |
892 | static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, | |
eabe7881 | 893 | vm_vaddr_t vaddr_min) |
783e9e51 PB |
894 | { |
895 | uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; | |
896 | ||
897 | /* Determine lowest permitted virtual page index. */ | |
898 | uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; | |
899 | if ((pgidx_start * vm->page_size) < vaddr_min) | |
eabe7881 | 900 | goto no_va_found; |
783e9e51 PB |
901 | |
902 | /* Loop over section with enough valid virtual page indexes. */ | |
903 | if (!sparsebit_is_set_num(vm->vpages_valid, | |
904 | pgidx_start, pages)) | |
905 | pgidx_start = sparsebit_next_set_num(vm->vpages_valid, | |
906 | pgidx_start, pages); | |
907 | do { | |
908 | /* | |
909 | * Are there enough unused virtual pages available at | |
910 | * the currently proposed starting virtual page index. | |
911 | * If not, adjust proposed starting index to next | |
912 | * possible. | |
913 | */ | |
914 | if (sparsebit_is_clear_num(vm->vpages_mapped, | |
915 | pgidx_start, pages)) | |
916 | goto va_found; | |
917 | pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, | |
918 | pgidx_start, pages); | |
919 | if (pgidx_start == 0) | |
920 | goto no_va_found; | |
921 | ||
922 | /* | |
923 | * If needed, adjust proposed starting virtual address, | |
924 | * to next range of valid virtual addresses. | |
925 | */ | |
926 | if (!sparsebit_is_set_num(vm->vpages_valid, | |
927 | pgidx_start, pages)) { | |
928 | pgidx_start = sparsebit_next_set_num( | |
929 | vm->vpages_valid, pgidx_start, pages); | |
930 | if (pgidx_start == 0) | |
931 | goto no_va_found; | |
932 | } | |
933 | } while (pgidx_start != 0); | |
934 | ||
935 | no_va_found: | |
352be2c5 | 936 | TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); |
783e9e51 PB |
937 | |
938 | /* NOT REACHED */ | |
939 | return -1; | |
940 | ||
941 | va_found: | |
942 | TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, | |
943 | pgidx_start, pages), | |
944 | "Unexpected, invalid virtual page index range,\n" | |
945 | " pgidx_start: 0x%lx\n" | |
946 | " pages: 0x%lx", | |
947 | pgidx_start, pages); | |
948 | TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, | |
949 | pgidx_start, pages), | |
950 | "Unexpected, pages already mapped,\n" | |
951 | " pgidx_start: 0x%lx\n" | |
952 | " pages: 0x%lx", | |
953 | pgidx_start, pages); | |
954 | ||
955 | return pgidx_start * vm->page_size; | |
956 | } | |
957 | ||
eabe7881 AJ |
958 | /* |
959 | * VM Virtual Address Allocate | |
783e9e51 PB |
960 | * |
961 | * Input Args: | |
962 | * vm - Virtual Machine | |
963 | * sz - Size in bytes | |
964 | * vaddr_min - Minimum starting virtual address | |
965 | * data_memslot - Memory region slot for data pages | |
966 | * pgd_memslot - Memory region slot for new virtual translation tables | |
967 | * | |
968 | * Output Args: None | |
969 | * | |
970 | * Return: | |
971 | * Starting guest virtual address | |
972 | * | |
973 | * Allocates at least sz bytes within the virtual address space of the vm | |
974 | * given by vm. The allocated bytes are mapped to a virtual address >= | |
975 | * the address given by vaddr_min. Note that each allocation uses a | |
976 | * a unique set of pages, with the minimum real allocation being at least | |
977 | * a page. | |
978 | */ | |
979 | vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, | |
eabe7881 | 980 | uint32_t data_memslot, uint32_t pgd_memslot) |
783e9e51 PB |
981 | { |
982 | uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); | |
983 | ||
984 | virt_pgd_alloc(vm, pgd_memslot); | |
985 | ||
eabe7881 AJ |
986 | /* |
987 | * Find an unused range of virtual page addresses of at least | |
783e9e51 PB |
988 | * pages in length. |
989 | */ | |
990 | vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); | |
991 | ||
992 | /* Map the virtual pages. */ | |
993 | for (vm_vaddr_t vaddr = vaddr_start; pages > 0; | |
994 | pages--, vaddr += vm->page_size) { | |
995 | vm_paddr_t paddr; | |
996 | ||
81d1cca0 AJ |
997 | paddr = vm_phy_page_alloc(vm, |
998 | KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); | |
783e9e51 PB |
999 | |
1000 | virt_pg_map(vm, vaddr, paddr, pgd_memslot); | |
1001 | ||
1002 | sparsebit_set(vm->vpages_mapped, | |
1003 | vaddr >> vm->page_shift); | |
1004 | } | |
1005 | ||
1006 | return vaddr_start; | |
1007 | } | |
1008 | ||
3b4cd0ff PX |
1009 | /* |
1010 | * Map a range of VM virtual address to the VM's physical address | |
1011 | * | |
1012 | * Input Args: | |
1013 | * vm - Virtual Machine | |
1014 | * vaddr - Virtuall address to map | |
1015 | * paddr - VM Physical Address | |
beca5470 | 1016 | * npages - The number of pages to map |
3b4cd0ff PX |
1017 | * pgd_memslot - Memory region slot for new virtual translation tables |
1018 | * | |
1019 | * Output Args: None | |
1020 | * | |
1021 | * Return: None | |
1022 | * | |
beca5470 AJ |
1023 | * Within the VM given by @vm, creates a virtual translation for |
1024 | * @npages starting at @vaddr to the page range starting at @paddr. | |
3b4cd0ff PX |
1025 | */ |
1026 | void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, | |
beca5470 | 1027 | unsigned int npages, uint32_t pgd_memslot) |
3b4cd0ff PX |
1028 | { |
1029 | size_t page_size = vm->page_size; | |
beca5470 | 1030 | size_t size = npages * page_size; |
3b4cd0ff PX |
1031 | |
1032 | TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); | |
1033 | TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); | |
1034 | ||
1035 | while (npages--) { | |
1036 | virt_pg_map(vm, vaddr, paddr, pgd_memslot); | |
1037 | vaddr += page_size; | |
1038 | paddr += page_size; | |
1039 | } | |
1040 | } | |
1041 | ||
eabe7881 AJ |
1042 | /* |
1043 | * Address VM Physical to Host Virtual | |
783e9e51 PB |
1044 | * |
1045 | * Input Args: | |
1046 | * vm - Virtual Machine | |
1047 | * gpa - VM physical address | |
1048 | * | |
1049 | * Output Args: None | |
1050 | * | |
1051 | * Return: | |
1052 | * Equivalent host virtual address | |
1053 | * | |
1054 | * Locates the memory region containing the VM physical address given | |
1055 | * by gpa, within the VM given by vm. When found, the host virtual | |
1056 | * address providing the memory to the vm physical address is returned. | |
1057 | * A TEST_ASSERT failure occurs if no region containing gpa exists. | |
1058 | */ | |
1059 | void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) | |
1060 | { | |
1061 | struct userspace_mem_region *region; | |
1062 | for (region = vm->userspace_mem_region_head; region; | |
1063 | region = region->next) { | |
1064 | if ((gpa >= region->region.guest_phys_addr) | |
1065 | && (gpa <= (region->region.guest_phys_addr | |
1066 | + region->region.memory_size - 1))) | |
1067 | return (void *) ((uintptr_t) region->host_mem | |
1068 | + (gpa - region->region.guest_phys_addr)); | |
1069 | } | |
1070 | ||
352be2c5 | 1071 | TEST_FAIL("No vm physical memory at 0x%lx", gpa); |
783e9e51 PB |
1072 | return NULL; |
1073 | } | |
1074 | ||
eabe7881 AJ |
1075 | /* |
1076 | * Address Host Virtual to VM Physical | |
783e9e51 PB |
1077 | * |
1078 | * Input Args: | |
1079 | * vm - Virtual Machine | |
1080 | * hva - Host virtual address | |
1081 | * | |
1082 | * Output Args: None | |
1083 | * | |
1084 | * Return: | |
1085 | * Equivalent VM physical address | |
1086 | * | |
1087 | * Locates the memory region containing the host virtual address given | |
1088 | * by hva, within the VM given by vm. When found, the equivalent | |
1089 | * VM physical address is returned. A TEST_ASSERT failure occurs if no | |
1090 | * region containing hva exists. | |
1091 | */ | |
1092 | vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) | |
1093 | { | |
1094 | struct userspace_mem_region *region; | |
1095 | for (region = vm->userspace_mem_region_head; region; | |
1096 | region = region->next) { | |
1097 | if ((hva >= region->host_mem) | |
1098 | && (hva <= (region->host_mem | |
1099 | + region->region.memory_size - 1))) | |
1100 | return (vm_paddr_t) ((uintptr_t) | |
1101 | region->region.guest_phys_addr | |
1102 | + (hva - (uintptr_t) region->host_mem)); | |
1103 | } | |
1104 | ||
352be2c5 | 1105 | TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); |
783e9e51 PB |
1106 | return -1; |
1107 | } | |
1108 | ||
eabe7881 AJ |
1109 | /* |
1110 | * VM Create IRQ Chip | |
783e9e51 PB |
1111 | * |
1112 | * Input Args: | |
1113 | * vm - Virtual Machine | |
1114 | * | |
1115 | * Output Args: None | |
1116 | * | |
1117 | * Return: None | |
1118 | * | |
1119 | * Creates an interrupt controller chip for the VM specified by vm. | |
1120 | */ | |
1121 | void vm_create_irqchip(struct kvm_vm *vm) | |
1122 | { | |
1123 | int ret; | |
1124 | ||
1125 | ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); | |
1126 | TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, " | |
1127 | "rc: %i errno: %i", ret, errno); | |
fa3899ad PB |
1128 | |
1129 | vm->has_irqchip = true; | |
783e9e51 PB |
1130 | } |
1131 | ||
eabe7881 AJ |
1132 | /* |
1133 | * VM VCPU State | |
783e9e51 PB |
1134 | * |
1135 | * Input Args: | |
1136 | * vm - Virtual Machine | |
1137 | * vcpuid - VCPU ID | |
1138 | * | |
1139 | * Output Args: None | |
1140 | * | |
1141 | * Return: | |
1142 | * Pointer to structure that describes the state of the VCPU. | |
1143 | * | |
1144 | * Locates and returns a pointer to a structure that describes the | |
1145 | * state of the VCPU with the given vcpuid. | |
1146 | */ | |
1147 | struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) | |
1148 | { | |
1149 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1150 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1151 | ||
1152 | return vcpu->state; | |
1153 | } | |
1154 | ||
eabe7881 AJ |
1155 | /* |
1156 | * VM VCPU Run | |
783e9e51 PB |
1157 | * |
1158 | * Input Args: | |
1159 | * vm - Virtual Machine | |
1160 | * vcpuid - VCPU ID | |
1161 | * | |
1162 | * Output Args: None | |
1163 | * | |
1164 | * Return: None | |
1165 | * | |
1166 | * Switch to executing the code for the VCPU given by vcpuid, within the VM | |
1167 | * given by vm. | |
1168 | */ | |
1169 | void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) | |
1170 | { | |
1171 | int ret = _vcpu_run(vm, vcpuid); | |
1172 | TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " | |
1173 | "rc: %i errno: %i", ret, errno); | |
1174 | } | |
1175 | ||
1176 | int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) | |
1177 | { | |
1178 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1179 | int rc; | |
1180 | ||
1181 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
eabe7881 | 1182 | do { |
783e9e51 PB |
1183 | rc = ioctl(vcpu->fd, KVM_RUN, NULL); |
1184 | } while (rc == -1 && errno == EINTR); | |
1185 | return rc; | |
1186 | } | |
1187 | ||
0f73bbc8 SC |
1188 | void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) |
1189 | { | |
1190 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1191 | int ret; | |
1192 | ||
1193 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1194 | ||
1195 | vcpu->state->immediate_exit = 1; | |
1196 | ret = ioctl(vcpu->fd, KVM_RUN, NULL); | |
1197 | vcpu->state->immediate_exit = 0; | |
1198 | ||
1199 | TEST_ASSERT(ret == -1 && errno == EINTR, | |
1200 | "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", | |
1201 | ret, errno); | |
1202 | } | |
1203 | ||
449aa906 PX |
1204 | void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, |
1205 | struct kvm_guest_debug *debug) | |
1206 | { | |
1207 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1208 | int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug); | |
1209 | ||
1210 | TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret); | |
1211 | } | |
1212 | ||
eabe7881 AJ |
1213 | /* |
1214 | * VM VCPU Set MP State | |
783e9e51 PB |
1215 | * |
1216 | * Input Args: | |
1217 | * vm - Virtual Machine | |
1218 | * vcpuid - VCPU ID | |
1219 | * mp_state - mp_state to be set | |
1220 | * | |
1221 | * Output Args: None | |
1222 | * | |
1223 | * Return: None | |
1224 | * | |
1225 | * Sets the MP state of the VCPU given by vcpuid, to the state given | |
1226 | * by mp_state. | |
1227 | */ | |
1228 | void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, | |
eabe7881 | 1229 | struct kvm_mp_state *mp_state) |
783e9e51 PB |
1230 | { |
1231 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1232 | int ret; | |
1233 | ||
1234 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1235 | ||
1236 | ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); | |
1237 | TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, " | |
1238 | "rc: %i errno: %i", ret, errno); | |
1239 | } | |
1240 | ||
eabe7881 AJ |
1241 | /* |
1242 | * VM VCPU Regs Get | |
783e9e51 PB |
1243 | * |
1244 | * Input Args: | |
1245 | * vm - Virtual Machine | |
1246 | * vcpuid - VCPU ID | |
1247 | * | |
1248 | * Output Args: | |
1249 | * regs - current state of VCPU regs | |
1250 | * | |
1251 | * Return: None | |
1252 | * | |
1253 | * Obtains the current register state for the VCPU specified by vcpuid | |
1254 | * and stores it at the location given by regs. | |
1255 | */ | |
eabe7881 | 1256 | void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) |
783e9e51 PB |
1257 | { |
1258 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1259 | int ret; | |
1260 | ||
1261 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1262 | ||
783e9e51 PB |
1263 | ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); |
1264 | TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", | |
1265 | ret, errno); | |
1266 | } | |
1267 | ||
eabe7881 AJ |
1268 | /* |
1269 | * VM VCPU Regs Set | |
783e9e51 PB |
1270 | * |
1271 | * Input Args: | |
1272 | * vm - Virtual Machine | |
1273 | * vcpuid - VCPU ID | |
1274 | * regs - Values to set VCPU regs to | |
1275 | * | |
1276 | * Output Args: None | |
1277 | * | |
1278 | * Return: None | |
1279 | * | |
1280 | * Sets the regs of the VCPU specified by vcpuid to the values | |
1281 | * given by regs. | |
1282 | */ | |
eabe7881 | 1283 | void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) |
783e9e51 PB |
1284 | { |
1285 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1286 | int ret; | |
1287 | ||
1288 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1289 | ||
783e9e51 PB |
1290 | ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); |
1291 | TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", | |
1292 | ret, errno); | |
1293 | } | |
1294 | ||
a9c788f0 | 1295 | #ifdef __KVM_HAVE_VCPU_EVENTS |
783e9e51 | 1296 | void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, |
eabe7881 | 1297 | struct kvm_vcpu_events *events) |
783e9e51 PB |
1298 | { |
1299 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1300 | int ret; | |
1301 | ||
1302 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1303 | ||
783e9e51 PB |
1304 | ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); |
1305 | TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", | |
1306 | ret, errno); | |
1307 | } | |
1308 | ||
1309 | void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, | |
eabe7881 | 1310 | struct kvm_vcpu_events *events) |
783e9e51 PB |
1311 | { |
1312 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1313 | int ret; | |
1314 | ||
1315 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1316 | ||
783e9e51 PB |
1317 | ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); |
1318 | TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", | |
1319 | ret, errno); | |
1320 | } | |
a9c788f0 | 1321 | #endif |
783e9e51 | 1322 | |
c7957206 | 1323 | #ifdef __x86_64__ |
da1e3071 AL |
1324 | void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, |
1325 | struct kvm_nested_state *state) | |
1326 | { | |
1327 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1328 | int ret; | |
1329 | ||
1330 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1331 | ||
1332 | ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state); | |
1333 | TEST_ASSERT(ret == 0, | |
1334 | "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", | |
1335 | ret, errno); | |
1336 | } | |
1337 | ||
1338 | int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, | |
1339 | struct kvm_nested_state *state, bool ignore_error) | |
1340 | { | |
1341 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1342 | int ret; | |
1343 | ||
1344 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1345 | ||
1346 | ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state); | |
1347 | if (!ignore_error) { | |
1348 | TEST_ASSERT(ret == 0, | |
1349 | "KVM_SET_NESTED_STATE failed, ret: %i errno: %i", | |
1350 | ret, errno); | |
1351 | } | |
1352 | ||
1353 | return ret; | |
1354 | } | |
c7957206 | 1355 | #endif |
da1e3071 | 1356 | |
eabe7881 AJ |
1357 | /* |
1358 | * VM VCPU System Regs Get | |
783e9e51 PB |
1359 | * |
1360 | * Input Args: | |
1361 | * vm - Virtual Machine | |
1362 | * vcpuid - VCPU ID | |
1363 | * | |
1364 | * Output Args: | |
1365 | * sregs - current state of VCPU system regs | |
1366 | * | |
1367 | * Return: None | |
1368 | * | |
1369 | * Obtains the current system register state for the VCPU specified by | |
1370 | * vcpuid and stores it at the location given by sregs. | |
1371 | */ | |
eabe7881 | 1372 | void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) |
783e9e51 PB |
1373 | { |
1374 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1375 | int ret; | |
1376 | ||
1377 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1378 | ||
783e9e51 PB |
1379 | ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); |
1380 | TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", | |
1381 | ret, errno); | |
1382 | } | |
1383 | ||
eabe7881 AJ |
1384 | /* |
1385 | * VM VCPU System Regs Set | |
783e9e51 PB |
1386 | * |
1387 | * Input Args: | |
1388 | * vm - Virtual Machine | |
1389 | * vcpuid - VCPU ID | |
1390 | * sregs - Values to set VCPU system regs to | |
1391 | * | |
1392 | * Output Args: None | |
1393 | * | |
1394 | * Return: None | |
1395 | * | |
1396 | * Sets the system regs of the VCPU specified by vcpuid to the values | |
1397 | * given by sregs. | |
1398 | */ | |
eabe7881 | 1399 | void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) |
783e9e51 PB |
1400 | { |
1401 | int ret = _vcpu_sregs_set(vm, vcpuid, sregs); | |
1402 | TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " | |
1403 | "rc: %i errno: %i", ret, errno); | |
1404 | } | |
1405 | ||
eabe7881 | 1406 | int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) |
783e9e51 PB |
1407 | { |
1408 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
783e9e51 PB |
1409 | |
1410 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1411 | ||
783e9e51 PB |
1412 | return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); |
1413 | } | |
1414 | ||
ada0a50d JF |
1415 | void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) |
1416 | { | |
1417 | int ret; | |
1418 | ||
1419 | ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu); | |
1420 | TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)", | |
1421 | ret, errno, strerror(errno)); | |
1422 | } | |
1423 | ||
1424 | void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu) | |
1425 | { | |
1426 | int ret; | |
1427 | ||
1428 | ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu); | |
1429 | TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)", | |
1430 | ret, errno, strerror(errno)); | |
1431 | } | |
1432 | ||
1433 | void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) | |
1434 | { | |
1435 | int ret; | |
1436 | ||
1437 | ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg); | |
1438 | TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)", | |
1439 | ret, errno, strerror(errno)); | |
1440 | } | |
1441 | ||
1442 | void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg) | |
1443 | { | |
1444 | int ret; | |
1445 | ||
1446 | ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg); | |
1447 | TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)", | |
1448 | ret, errno, strerror(errno)); | |
1449 | } | |
1450 | ||
eabe7881 AJ |
1451 | /* |
1452 | * VCPU Ioctl | |
783e9e51 PB |
1453 | * |
1454 | * Input Args: | |
1455 | * vm - Virtual Machine | |
1456 | * vcpuid - VCPU ID | |
1457 | * cmd - Ioctl number | |
1458 | * arg - Argument to pass to the ioctl | |
1459 | * | |
1460 | * Return: None | |
1461 | * | |
1462 | * Issues an arbitrary ioctl on a VCPU fd. | |
1463 | */ | |
eabe7881 AJ |
1464 | void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, |
1465 | unsigned long cmd, void *arg) | |
7e50c424 VK |
1466 | { |
1467 | int ret; | |
1468 | ||
1469 | ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); | |
1470 | TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)", | |
1471 | cmd, ret, errno, strerror(errno)); | |
1472 | } | |
1473 | ||
1474 | int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, | |
1475 | unsigned long cmd, void *arg) | |
783e9e51 PB |
1476 | { |
1477 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1478 | int ret; | |
1479 | ||
1480 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1481 | ||
1482 | ret = ioctl(vcpu->fd, cmd, arg); | |
7e50c424 VK |
1483 | |
1484 | return ret; | |
783e9e51 PB |
1485 | } |
1486 | ||
eabe7881 AJ |
1487 | /* |
1488 | * VM Ioctl | |
783e9e51 PB |
1489 | * |
1490 | * Input Args: | |
1491 | * vm - Virtual Machine | |
1492 | * cmd - Ioctl number | |
1493 | * arg - Argument to pass to the ioctl | |
1494 | * | |
1495 | * Return: None | |
1496 | * | |
1497 | * Issues an arbitrary ioctl on a VM fd. | |
1498 | */ | |
1499 | void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) | |
1500 | { | |
1501 | int ret; | |
1502 | ||
1503 | ret = ioctl(vm->fd, cmd, arg); | |
1504 | TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)", | |
1505 | cmd, ret, errno, strerror(errno)); | |
1506 | } | |
1507 | ||
eabe7881 AJ |
1508 | /* |
1509 | * VM Dump | |
783e9e51 PB |
1510 | * |
1511 | * Input Args: | |
1512 | * vm - Virtual Machine | |
1513 | * indent - Left margin indent amount | |
1514 | * | |
1515 | * Output Args: | |
1516 | * stream - Output FILE stream | |
1517 | * | |
1518 | * Return: None | |
1519 | * | |
1520 | * Dumps the current state of the VM given by vm, to the FILE stream | |
1521 | * given by stream. | |
1522 | */ | |
1523 | void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) | |
1524 | { | |
1525 | struct userspace_mem_region *region; | |
1526 | struct vcpu *vcpu; | |
1527 | ||
1528 | fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); | |
1529 | fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); | |
1530 | fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); | |
1531 | fprintf(stream, "%*sMem Regions:\n", indent, ""); | |
1532 | for (region = vm->userspace_mem_region_head; region; | |
1533 | region = region->next) { | |
1534 | fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " | |
1535 | "host_virt: %p\n", indent + 2, "", | |
1536 | (uint64_t) region->region.guest_phys_addr, | |
1537 | (uint64_t) region->region.memory_size, | |
1538 | region->host_mem); | |
1539 | fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); | |
1540 | sparsebit_dump(stream, region->unused_phy_pages, 0); | |
1541 | } | |
1542 | fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); | |
1543 | sparsebit_dump(stream, vm->vpages_mapped, indent + 2); | |
1544 | fprintf(stream, "%*spgd_created: %u\n", indent, "", | |
1545 | vm->pgd_created); | |
1546 | if (vm->pgd_created) { | |
1547 | fprintf(stream, "%*sVirtual Translation Tables:\n", | |
1548 | indent + 2, ""); | |
1549 | virt_dump(stream, vm, indent + 4); | |
1550 | } | |
1551 | fprintf(stream, "%*sVCPUs:\n", indent, ""); | |
1552 | for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next) | |
1553 | vcpu_dump(stream, vm, vcpu->id, indent + 2); | |
1554 | } | |
1555 | ||
783e9e51 PB |
1556 | /* Known KVM exit reasons */ |
1557 | static struct exit_reason { | |
1558 | unsigned int reason; | |
1559 | const char *name; | |
1560 | } exit_reasons_known[] = { | |
1561 | {KVM_EXIT_UNKNOWN, "UNKNOWN"}, | |
1562 | {KVM_EXIT_EXCEPTION, "EXCEPTION"}, | |
1563 | {KVM_EXIT_IO, "IO"}, | |
1564 | {KVM_EXIT_HYPERCALL, "HYPERCALL"}, | |
1565 | {KVM_EXIT_DEBUG, "DEBUG"}, | |
1566 | {KVM_EXIT_HLT, "HLT"}, | |
1567 | {KVM_EXIT_MMIO, "MMIO"}, | |
1568 | {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"}, | |
1569 | {KVM_EXIT_SHUTDOWN, "SHUTDOWN"}, | |
1570 | {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"}, | |
1571 | {KVM_EXIT_INTR, "INTR"}, | |
1572 | {KVM_EXIT_SET_TPR, "SET_TPR"}, | |
1573 | {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"}, | |
1574 | {KVM_EXIT_S390_SIEIC, "S390_SIEIC"}, | |
1575 | {KVM_EXIT_S390_RESET, "S390_RESET"}, | |
1576 | {KVM_EXIT_DCR, "DCR"}, | |
1577 | {KVM_EXIT_NMI, "NMI"}, | |
1578 | {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"}, | |
1579 | {KVM_EXIT_OSI, "OSI"}, | |
1580 | {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"}, | |
1581 | #ifdef KVM_EXIT_MEMORY_NOT_PRESENT | |
1582 | {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, | |
1583 | #endif | |
1584 | }; | |
1585 | ||
eabe7881 AJ |
1586 | /* |
1587 | * Exit Reason String | |
783e9e51 PB |
1588 | * |
1589 | * Input Args: | |
1590 | * exit_reason - Exit reason | |
1591 | * | |
1592 | * Output Args: None | |
1593 | * | |
1594 | * Return: | |
1595 | * Constant string pointer describing the exit reason. | |
1596 | * | |
1597 | * Locates and returns a constant string that describes the KVM exit | |
1598 | * reason given by exit_reason. If no such string is found, a constant | |
1599 | * string of "Unknown" is returned. | |
1600 | */ | |
1601 | const char *exit_reason_str(unsigned int exit_reason) | |
1602 | { | |
1603 | unsigned int n1; | |
1604 | ||
1605 | for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { | |
1606 | if (exit_reason == exit_reasons_known[n1].reason) | |
1607 | return exit_reasons_known[n1].name; | |
1608 | } | |
1609 | ||
1610 | return "Unknown"; | |
1611 | } | |
1612 | ||
eabe7881 | 1613 | /* |
d5106539 | 1614 | * Physical Contiguous Page Allocator |
783e9e51 PB |
1615 | * |
1616 | * Input Args: | |
1617 | * vm - Virtual Machine | |
d5106539 | 1618 | * num - number of pages |
783e9e51 PB |
1619 | * paddr_min - Physical address minimum |
1620 | * memslot - Memory region to allocate page from | |
1621 | * | |
1622 | * Output Args: None | |
1623 | * | |
1624 | * Return: | |
1625 | * Starting physical address | |
1626 | * | |
d5106539 AJ |
1627 | * Within the VM specified by vm, locates a range of available physical |
1628 | * pages at or above paddr_min. If found, the pages are marked as in use | |
cdbd2428 | 1629 | * and their base address is returned. A TEST_ASSERT failure occurs if |
d5106539 | 1630 | * not enough pages are available at or above paddr_min. |
783e9e51 | 1631 | */ |
d5106539 AJ |
1632 | vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, |
1633 | vm_paddr_t paddr_min, uint32_t memslot) | |
783e9e51 PB |
1634 | { |
1635 | struct userspace_mem_region *region; | |
d5106539 AJ |
1636 | sparsebit_idx_t pg, base; |
1637 | ||
1638 | TEST_ASSERT(num > 0, "Must allocate at least one page"); | |
783e9e51 PB |
1639 | |
1640 | TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " | |
4d5f26ee | 1641 | "not divisible by page size.\n" |
783e9e51 PB |
1642 | " paddr_min: 0x%lx page_size: 0x%x", |
1643 | paddr_min, vm->page_size); | |
1644 | ||
783e9e51 | 1645 | region = memslot2region(vm, memslot); |
d5106539 AJ |
1646 | base = pg = paddr_min >> vm->page_shift; |
1647 | ||
1648 | do { | |
1649 | for (; pg < base + num; ++pg) { | |
1650 | if (!sparsebit_is_set(region->unused_phy_pages, pg)) { | |
1651 | base = pg = sparsebit_next_set(region->unused_phy_pages, pg); | |
1652 | break; | |
1653 | } | |
783e9e51 | 1654 | } |
d5106539 AJ |
1655 | } while (pg && pg != base + num); |
1656 | ||
1657 | if (pg == 0) { | |
1658 | fprintf(stderr, "No guest physical page available, " | |
1659 | "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", | |
1660 | paddr_min, vm->page_size, memslot); | |
1661 | fputs("---- vm dump ----\n", stderr); | |
1662 | vm_dump(stderr, vm, 2); | |
1663 | abort(); | |
783e9e51 PB |
1664 | } |
1665 | ||
d5106539 AJ |
1666 | for (pg = base; pg < base + num; ++pg) |
1667 | sparsebit_clear(region->unused_phy_pages, pg); | |
1668 | ||
1669 | return base * vm->page_size; | |
1670 | } | |
783e9e51 | 1671 | |
d5106539 AJ |
1672 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, |
1673 | uint32_t memslot) | |
1674 | { | |
1675 | return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); | |
783e9e51 PB |
1676 | } |
1677 | ||
eabe7881 AJ |
1678 | /* |
1679 | * Address Guest Virtual to Host Virtual | |
783e9e51 PB |
1680 | * |
1681 | * Input Args: | |
1682 | * vm - Virtual Machine | |
1683 | * gva - VM virtual address | |
1684 | * | |
1685 | * Output Args: None | |
1686 | * | |
1687 | * Return: | |
1688 | * Equivalent host virtual address | |
1689 | */ | |
1690 | void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) | |
1691 | { | |
1692 | return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); | |
1693 | } | |
9dba988e AL |
1694 | |
1695 | /* | |
1696 | * Is Unrestricted Guest | |
1697 | * | |
1698 | * Input Args: | |
1699 | * vm - Virtual Machine | |
1700 | * | |
1701 | * Output Args: None | |
1702 | * | |
1703 | * Return: True if the unrestricted guest is set to 'Y', otherwise return false. | |
1704 | * | |
1705 | * Check if the unrestricted guest flag is enabled. | |
1706 | */ | |
1707 | bool vm_is_unrestricted_guest(struct kvm_vm *vm) | |
1708 | { | |
1709 | char val = 'N'; | |
1710 | size_t count; | |
1711 | FILE *f; | |
1712 | ||
1713 | if (vm == NULL) { | |
1714 | /* Ensure that the KVM vendor-specific module is loaded. */ | |
1715 | f = fopen(KVM_DEV_PATH, "r"); | |
1716 | TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d", | |
1717 | errno); | |
1718 | fclose(f); | |
1719 | } | |
1720 | ||
1721 | f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); | |
1722 | if (f) { | |
1723 | count = fread(&val, sizeof(char), 1, f); | |
1724 | TEST_ASSERT(count == 1, "Unable to read from param file."); | |
1725 | fclose(f); | |
1726 | } | |
1727 | ||
1728 | return val == 'Y'; | |
1729 | } | |
52200d0d PX |
1730 | |
1731 | unsigned int vm_get_page_size(struct kvm_vm *vm) | |
1732 | { | |
1733 | return vm->page_size; | |
1734 | } | |
1735 | ||
1736 | unsigned int vm_get_page_shift(struct kvm_vm *vm) | |
1737 | { | |
1738 | return vm->page_shift; | |
1739 | } | |
1740 | ||
1741 | unsigned int vm_get_max_gfn(struct kvm_vm *vm) | |
1742 | { | |
1743 | return vm->max_gfn; | |
1744 | } | |
87a802d9 AJ |
1745 | |
1746 | static unsigned int vm_calc_num_pages(unsigned int num_pages, | |
1747 | unsigned int page_shift, | |
1748 | unsigned int new_page_shift, | |
1749 | bool ceil) | |
1750 | { | |
1751 | unsigned int n = 1 << (new_page_shift - page_shift); | |
1752 | ||
1753 | if (page_shift >= new_page_shift) | |
1754 | return num_pages * (1 << (page_shift - new_page_shift)); | |
1755 | ||
1756 | return num_pages / n + !!(ceil && num_pages % n); | |
1757 | } | |
1758 | ||
1759 | static inline int getpageshift(void) | |
1760 | { | |
1761 | return __builtin_ffs(getpagesize()) - 1; | |
1762 | } | |
1763 | ||
1764 | unsigned int | |
1765 | vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) | |
1766 | { | |
1767 | return vm_calc_num_pages(num_guest_pages, | |
1768 | vm_guest_mode_params[mode].page_shift, | |
1769 | getpageshift(), true); | |
1770 | } | |
1771 | ||
1772 | unsigned int | |
1773 | vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) | |
1774 | { | |
1775 | return vm_calc_num_pages(num_host_pages, getpageshift(), | |
1776 | vm_guest_mode_params[mode].page_shift, false); | |
1777 | } | |
94c4b76b AJ |
1778 | |
1779 | unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) | |
1780 | { | |
1781 | unsigned int n; | |
1782 | n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); | |
1783 | return vm_adjust_num_guest_pages(mode, n); | |
1784 | } |