]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - tools/testing/selftests/kvm/s390x/cmma_test.c
Merge tag 'vfs-6.10-rc4.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[thirdparty/kernel/stable.git] / tools / testing / selftests / kvm / s390x / cmma_test.c
CommitLineData
e325ba22
NB
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Test for s390x CMMA migration
4 *
5 * Copyright IBM Corp. 2023
6 *
7 * Authors:
8 * Nico Boehr <nrb@linux.ibm.com>
9 */
e325ba22
NB
10#include <fcntl.h>
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14#include <sys/ioctl.h>
15
16#include "test_util.h"
17#include "kvm_util.h"
18#include "kselftest.h"
2b7deea3 19#include "ucall_common.h"
e325ba22
NB
20
21#define MAIN_PAGE_COUNT 512
22
23#define TEST_DATA_PAGE_COUNT 512
24#define TEST_DATA_MEMSLOT 1
25#define TEST_DATA_START_GFN 4096
26
27#define TEST_DATA_TWO_PAGE_COUNT 256
28#define TEST_DATA_TWO_MEMSLOT 2
29#define TEST_DATA_TWO_START_GFN 8192
30
31static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
32
33/**
34 * Dirty CMMA attributes of exactly one page in the TEST_DATA memslot,
35 * so use_cmma goes on and the CMMA related ioctls do something.
36 */
37static void guest_do_one_essa(void)
38{
39 asm volatile(
40 /* load TEST_DATA_START_GFN into r1 */
41 " llilf 1,%[start_gfn]\n"
42 /* calculate the address from the gfn */
43 " sllg 1,1,12(0)\n"
44 /* set the first page in TEST_DATA memslot to STABLE */
45 " .insn rrf,0xb9ab0000,2,1,1,0\n"
46 /* hypercall */
47 " diag 0,0,0x501\n"
48 "0: j 0b"
49 :
50 : [start_gfn] "L"(TEST_DATA_START_GFN)
51 : "r1", "r2", "memory", "cc"
52 );
53}
54
55/**
56 * Touch CMMA attributes of all pages in TEST_DATA memslot. Set them to stable
57 * state.
58 */
59static void guest_dirty_test_data(void)
60{
61 asm volatile(
62 /* r1 = TEST_DATA_START_GFN */
63 " xgr 1,1\n"
64 " llilf 1,%[start_gfn]\n"
65 /* r5 = TEST_DATA_PAGE_COUNT */
66 " lghi 5,%[page_count]\n"
67 /* r5 += r1 */
68 "2: agfr 5,1\n"
69 /* r2 = r1 << 12 */
70 "1: sllg 2,1,12(0)\n"
71 /* essa(r4, r2, SET_STABLE) */
72 " .insn rrf,0xb9ab0000,4,2,1,0\n"
73 /* i++ */
74 " agfi 1,1\n"
75 /* if r1 < r5 goto 1 */
76 " cgrjl 1,5,1b\n"
77 /* hypercall */
78 " diag 0,0,0x501\n"
79 "0: j 0b"
80 :
81 : [start_gfn] "L"(TEST_DATA_START_GFN),
82 [page_count] "L"(TEST_DATA_PAGE_COUNT)
83 :
84 /* the counter in our loop over the pages */
85 "r1",
86 /* the calculated page physical address */
87 "r2",
88 /* ESSA output register */
89 "r4",
90 /* last page */
91 "r5",
92 "cc", "memory"
93 );
94}
95
e325ba22
NB
96static void create_main_memslot(struct kvm_vm *vm)
97{
98 int i;
99
100 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0);
101 /* set the array of memslots to zero like __vm_create does */
102 for (i = 0; i < NR_MEM_REGIONS; i++)
103 vm->memslots[i] = 0;
104}
105
106static void create_test_memslot(struct kvm_vm *vm)
107{
108 vm_userspace_mem_region_add(vm,
109 VM_MEM_SRC_ANONYMOUS,
110 TEST_DATA_START_GFN << vm->page_shift,
111 TEST_DATA_MEMSLOT,
112 TEST_DATA_PAGE_COUNT,
113 0
114 );
115 vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
116}
117
118static void create_memslots(struct kvm_vm *vm)
119{
120 /*
121 * Our VM has the following memory layout:
122 * +------+---------------------------+
123 * | GFN | Memslot |
124 * +------+---------------------------+
125 * | 0 | |
126 * | ... | MAIN (Code, Stack, ...) |
127 * | 511 | |
128 * +------+---------------------------+
129 * | 4096 | |
130 * | ... | TEST_DATA |
131 * | 4607 | |
132 * +------+---------------------------+
133 */
134 create_main_memslot(vm);
135 create_test_memslot(vm);
136}
137
138static void finish_vm_setup(struct kvm_vm *vm)
139{
140 struct userspace_mem_region *slot0;
141
142 kvm_vm_elf_load(vm, program_invocation_name);
143
144 slot0 = memslot2region(vm, 0);
145 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
146
147 kvm_arch_vm_post_create(vm);
148}
149
150static struct kvm_vm *create_vm_two_memslots(void)
151{
152 struct kvm_vm *vm;
153
ef5b6a54 154 vm = vm_create_barebones();
e325ba22
NB
155
156 create_memslots(vm);
157
158 finish_vm_setup(vm);
159
160 return vm;
161}
162
163static void enable_cmma(struct kvm_vm *vm)
164{
165 int r;
166
167 r = __kvm_device_attr_set(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA, NULL);
168 TEST_ASSERT(!r, "enabling cmma failed r=%d errno=%d", r, errno);
169}
170
171static void enable_dirty_tracking(struct kvm_vm *vm)
172{
173 vm_mem_region_set_flags(vm, 0, KVM_MEM_LOG_DIRTY_PAGES);
174 vm_mem_region_set_flags(vm, TEST_DATA_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
175}
176
177static int __enable_migration_mode(struct kvm_vm *vm)
178{
179 return __kvm_device_attr_set(vm->fd,
180 KVM_S390_VM_MIGRATION,
181 KVM_S390_VM_MIGRATION_START,
182 NULL
183 );
184}
185
186static void enable_migration_mode(struct kvm_vm *vm)
187{
188 int r = __enable_migration_mode(vm);
189
190 TEST_ASSERT(!r, "enabling migration mode failed r=%d errno=%d", r, errno);
191}
192
193static bool is_migration_mode_on(struct kvm_vm *vm)
194{
195 u64 out;
196 int r;
197
198 r = __kvm_device_attr_get(vm->fd,
199 KVM_S390_VM_MIGRATION,
200 KVM_S390_VM_MIGRATION_STATUS,
201 &out
202 );
203 TEST_ASSERT(!r, "getting migration mode status failed r=%d errno=%d", r, errno);
204 return out;
205}
206
207static int vm_get_cmma_bits(struct kvm_vm *vm, u64 flags, int *errno_out)
208{
209 struct kvm_s390_cmma_log args;
210 int rc;
211
212 errno = 0;
213
214 args = (struct kvm_s390_cmma_log){
215 .start_gfn = 0,
216 .count = sizeof(cmma_value_buf),
217 .flags = flags,
218 .values = (__u64)&cmma_value_buf[0]
219 };
220 rc = __vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
221
222 *errno_out = errno;
223 return rc;
224}
225
226static void test_get_cmma_basic(void)
227{
228 struct kvm_vm *vm = create_vm_two_memslots();
229 struct kvm_vcpu *vcpu;
230 int rc, errno_out;
231
232 /* GET_CMMA_BITS without CMMA enabled should fail */
233 rc = vm_get_cmma_bits(vm, 0, &errno_out);
6d85f51a
TH
234 TEST_ASSERT_EQ(rc, -1);
235 TEST_ASSERT_EQ(errno_out, ENXIO);
e325ba22
NB
236
237 enable_cmma(vm);
238 vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
239
240 vcpu_run(vcpu);
241
242 /* GET_CMMA_BITS without migration mode and without peeking should fail */
243 rc = vm_get_cmma_bits(vm, 0, &errno_out);
6d85f51a
TH
244 TEST_ASSERT_EQ(rc, -1);
245 TEST_ASSERT_EQ(errno_out, EINVAL);
e325ba22
NB
246
247 /* GET_CMMA_BITS without migration mode and with peeking should work */
248 rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
6d85f51a
TH
249 TEST_ASSERT_EQ(rc, 0);
250 TEST_ASSERT_EQ(errno_out, 0);
e325ba22
NB
251
252 enable_dirty_tracking(vm);
253 enable_migration_mode(vm);
254
255 /* GET_CMMA_BITS with invalid flags */
256 rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
6d85f51a
TH
257 TEST_ASSERT_EQ(rc, -1);
258 TEST_ASSERT_EQ(errno_out, EINVAL);
e325ba22
NB
259
260 kvm_vm_free(vm);
261}
262
263static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
264{
6d85f51a
TH
265 TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
266 TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
267 TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
268 TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
e325ba22
NB
269}
270
271static void test_migration_mode(void)
272{
ef5b6a54 273 struct kvm_vm *vm = vm_create_barebones();
e325ba22
NB
274 struct kvm_vcpu *vcpu;
275 u64 orig_psw;
276 int rc;
277
278 /* enabling migration mode on a VM without memory should fail */
279 rc = __enable_migration_mode(vm);
6d85f51a
TH
280 TEST_ASSERT_EQ(rc, -1);
281 TEST_ASSERT_EQ(errno, EINVAL);
e325ba22
NB
282 TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
283 errno = 0;
284
285 create_memslots(vm);
286 finish_vm_setup(vm);
287
288 enable_cmma(vm);
289 vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
290 orig_psw = vcpu->run->psw_addr;
291
292 /*
293 * Execute one essa instruction in the guest. Otherwise the guest will
294 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
295 */
296 vcpu_run(vcpu);
297 assert_exit_was_hypercall(vcpu);
298
299 /* migration mode when memslots have dirty tracking off should fail */
300 rc = __enable_migration_mode(vm);
6d85f51a
TH
301 TEST_ASSERT_EQ(rc, -1);
302 TEST_ASSERT_EQ(errno, EINVAL);
e325ba22
NB
303 TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
304 errno = 0;
305
306 /* enable dirty tracking */
307 enable_dirty_tracking(vm);
308
309 /* enabling migration mode should work now */
310 rc = __enable_migration_mode(vm);
6d85f51a 311 TEST_ASSERT_EQ(rc, 0);
e325ba22
NB
312 TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
313 errno = 0;
314
315 /* execute another ESSA instruction to see this goes fine */
316 vcpu->run->psw_addr = orig_psw;
317 vcpu_run(vcpu);
318 assert_exit_was_hypercall(vcpu);
319
320 /*
321 * With migration mode on, create a new memslot with dirty tracking off.
322 * This should turn off migration mode.
323 */
324 TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
325 vm_userspace_mem_region_add(vm,
326 VM_MEM_SRC_ANONYMOUS,
327 TEST_DATA_TWO_START_GFN << vm->page_shift,
328 TEST_DATA_TWO_MEMSLOT,
329 TEST_DATA_TWO_PAGE_COUNT,
330 0
331 );
332 TEST_ASSERT(!is_migration_mode_on(vm),
333 "creating memslot without dirty tracking turns off migration mode"
334 );
335
336 /* ESSA instructions should still execute fine */
337 vcpu->run->psw_addr = orig_psw;
338 vcpu_run(vcpu);
339 assert_exit_was_hypercall(vcpu);
340
341 /*
342 * Turn on dirty tracking on the new memslot.
343 * It should be possible to turn migration mode back on again.
344 */
345 vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
346 rc = __enable_migration_mode(vm);
6d85f51a 347 TEST_ASSERT_EQ(rc, 0);
e325ba22
NB
348 TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
349 errno = 0;
350
351 /*
352 * Turn off dirty tracking again, this time with just a flag change.
353 * Again, migration mode should turn off.
354 */
355 TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
356 vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, 0);
357 TEST_ASSERT(!is_migration_mode_on(vm),
358 "disabling dirty tracking should turn off migration mode"
359 );
360
361 /* ESSA instructions should still execute fine */
362 vcpu->run->psw_addr = orig_psw;
363 vcpu_run(vcpu);
364 assert_exit_was_hypercall(vcpu);
365
366 kvm_vm_free(vm);
367}
368
369/**
370 * Given a VM with the MAIN and TEST_DATA memslot, assert that both slots have
371 * CMMA attributes of all pages in both memslots and nothing more dirty.
372 * This has the useful side effect of ensuring nothing is CMMA dirty after this
373 * function.
374 */
375static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
376{
377 struct kvm_s390_cmma_log args;
378
379 /*
380 * First iteration - everything should be dirty.
381 * Start at the main memslot...
382 */
383 args = (struct kvm_s390_cmma_log){
384 .start_gfn = 0,
385 .count = sizeof(cmma_value_buf),
386 .flags = 0,
387 .values = (__u64)&cmma_value_buf[0]
388 };
389 memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
390 vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
6d85f51a
TH
391 TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
392 TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
393 TEST_ASSERT_EQ(args.start_gfn, 0);
e325ba22
NB
394
395 /* ...and then - after a hole - the TEST_DATA memslot should follow */
396 args = (struct kvm_s390_cmma_log){
397 .start_gfn = MAIN_PAGE_COUNT,
398 .count = sizeof(cmma_value_buf),
399 .flags = 0,
400 .values = (__u64)&cmma_value_buf[0]
401 };
402 memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
403 vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
6d85f51a
TH
404 TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
405 TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
406 TEST_ASSERT_EQ(args.remaining, 0);
e325ba22
NB
407
408 /* ...and nothing else should be there */
409 args = (struct kvm_s390_cmma_log){
410 .start_gfn = TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT,
411 .count = sizeof(cmma_value_buf),
412 .flags = 0,
413 .values = (__u64)&cmma_value_buf[0]
414 };
415 memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
416 vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
6d85f51a
TH
417 TEST_ASSERT_EQ(args.count, 0);
418 TEST_ASSERT_EQ(args.start_gfn, 0);
419 TEST_ASSERT_EQ(args.remaining, 0);
e325ba22
NB
420}
421
422/**
423 * Given a VM, assert no pages are CMMA dirty.
424 */
425static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
426{
427 struct kvm_s390_cmma_log args;
428
429 /* If we start from GFN 0 again, nothing should be dirty. */
430 args = (struct kvm_s390_cmma_log){
431 .start_gfn = 0,
432 .count = sizeof(cmma_value_buf),
433 .flags = 0,
434 .values = (__u64)&cmma_value_buf[0]
435 };
436 memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
437 vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
438 if (args.count || args.remaining || args.start_gfn)
439 TEST_FAIL("pages are still dirty start_gfn=0x%llx count=%u remaining=%llu",
440 args.start_gfn,
441 args.count,
442 args.remaining
443 );
444}
445
446static void test_get_inital_dirty(void)
447{
448 struct kvm_vm *vm = create_vm_two_memslots();
449 struct kvm_vcpu *vcpu;
450
451 enable_cmma(vm);
452 vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
453
454 /*
455 * Execute one essa instruction in the guest. Otherwise the guest will
456 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
457 */
458 vcpu_run(vcpu);
459 assert_exit_was_hypercall(vcpu);
460
461 enable_dirty_tracking(vm);
462 enable_migration_mode(vm);
463
464 assert_all_slots_cmma_dirty(vm);
465
466 /* Start from the beginning again and make sure nothing else is dirty */
467 assert_no_pages_cmma_dirty(vm);
468
469 kvm_vm_free(vm);
470}
471
472static void query_cmma_range(struct kvm_vm *vm,
473 u64 start_gfn, u64 gfn_count,
474 struct kvm_s390_cmma_log *res_out)
475{
476 *res_out = (struct kvm_s390_cmma_log){
477 .start_gfn = start_gfn,
478 .count = gfn_count,
479 .flags = 0,
480 .values = (__u64)&cmma_value_buf[0]
481 };
482 memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
483 vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, res_out);
484}
485
486/**
487 * Assert the given cmma_log struct that was executed by query_cmma_range()
488 * indicates the first dirty gfn is at first_dirty_gfn and contains exactly
489 * dirty_gfn_count CMMA values.
490 */
491static void assert_cmma_dirty(u64 first_dirty_gfn,
492 u64 dirty_gfn_count,
493 const struct kvm_s390_cmma_log *res)
494{
6d85f51a
TH
495 TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
496 TEST_ASSERT_EQ(res->count, dirty_gfn_count);
e325ba22 497 for (size_t i = 0; i < dirty_gfn_count; i++)
6d85f51a
TH
498 TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
499 TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
e325ba22
NB
500}
501
502static void test_get_skip_holes(void)
503{
504 size_t gfn_offset;
505 struct kvm_vm *vm = create_vm_two_memslots();
506 struct kvm_s390_cmma_log log;
507 struct kvm_vcpu *vcpu;
508 u64 orig_psw;
509
510 enable_cmma(vm);
511 vcpu = vm_vcpu_add(vm, 1, guest_dirty_test_data);
512
513 orig_psw = vcpu->run->psw_addr;
514
515 /*
516 * Execute some essa instructions in the guest. Otherwise the guest will
517 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
518 */
519 vcpu_run(vcpu);
520 assert_exit_was_hypercall(vcpu);
521
522 enable_dirty_tracking(vm);
523 enable_migration_mode(vm);
524
525 /* un-dirty all pages */
526 assert_all_slots_cmma_dirty(vm);
527
528 /* Then, dirty just the TEST_DATA memslot */
529 vcpu->run->psw_addr = orig_psw;
530 vcpu_run(vcpu);
531
532 gfn_offset = TEST_DATA_START_GFN;
533 /**
534 * Query CMMA attributes of one page, starting at page 0. Since the
535 * main memslot was not touched by the VM, this should yield the first
536 * page of the TEST_DATA memslot.
537 * The dirty bitmap should now look like this:
538 * 0: not dirty
539 * [0x1, 0x200): dirty
540 */
541 query_cmma_range(vm, 0, 1, &log);
542 assert_cmma_dirty(gfn_offset, 1, &log);
543 gfn_offset++;
544
545 /**
546 * Query CMMA attributes of 32 (0x20) pages past the end of the TEST_DATA
547 * memslot. This should wrap back to the beginning of the TEST_DATA
548 * memslot, page 1.
549 * The dirty bitmap should now look like this:
550 * [0, 0x21): not dirty
551 * [0x21, 0x200): dirty
552 */
553 query_cmma_range(vm, TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT, 0x20, &log);
554 assert_cmma_dirty(gfn_offset, 0x20, &log);
555 gfn_offset += 0x20;
556
557 /* Skip 32 pages */
558 gfn_offset += 0x20;
559
560 /**
561 * After skipping 32 pages, query the next 32 (0x20) pages.
562 * The dirty bitmap should now look like this:
563 * [0, 0x21): not dirty
564 * [0x21, 0x41): dirty
565 * [0x41, 0x61): not dirty
566 * [0x61, 0x200): dirty
567 */
568 query_cmma_range(vm, gfn_offset, 0x20, &log);
569 assert_cmma_dirty(gfn_offset, 0x20, &log);
570 gfn_offset += 0x20;
571
572 /**
573 * Query 1 page from the beginning of the TEST_DATA memslot. This should
574 * yield page 0x21.
575 * The dirty bitmap should now look like this:
576 * [0, 0x22): not dirty
577 * [0x22, 0x41): dirty
578 * [0x41, 0x61): not dirty
579 * [0x61, 0x200): dirty
580 */
581 query_cmma_range(vm, TEST_DATA_START_GFN, 1, &log);
582 assert_cmma_dirty(TEST_DATA_START_GFN + 0x21, 1, &log);
583 gfn_offset++;
584
585 /**
586 * Query 15 (0xF) pages from page 0x23 in TEST_DATA memslot.
587 * This should yield pages [0x23, 0x33).
588 * The dirty bitmap should now look like this:
589 * [0, 0x22): not dirty
590 * 0x22: dirty
591 * [0x23, 0x33): not dirty
592 * [0x33, 0x41): dirty
593 * [0x41, 0x61): not dirty
594 * [0x61, 0x200): dirty
595 */
596 gfn_offset = TEST_DATA_START_GFN + 0x23;
597 query_cmma_range(vm, gfn_offset, 15, &log);
598 assert_cmma_dirty(gfn_offset, 15, &log);
599
600 /**
601 * Query 17 (0x11) pages from page 0x22 in TEST_DATA memslot.
602 * This should yield page [0x22, 0x33)
603 * The dirty bitmap should now look like this:
604 * [0, 0x33): not dirty
605 * [0x33, 0x41): dirty
606 * [0x41, 0x61): not dirty
607 * [0x61, 0x200): dirty
608 */
609 gfn_offset = TEST_DATA_START_GFN + 0x22;
610 query_cmma_range(vm, gfn_offset, 17, &log);
611 assert_cmma_dirty(gfn_offset, 17, &log);
612
613 /**
614 * Query 25 (0x19) pages from page 0x40 in TEST_DATA memslot.
615 * This should yield page 0x40 and nothing more, since there are more
616 * than 16 non-dirty pages after page 0x40.
617 * The dirty bitmap should now look like this:
618 * [0, 0x33): not dirty
619 * [0x33, 0x40): dirty
620 * [0x40, 0x61): not dirty
621 * [0x61, 0x200): dirty
622 */
623 gfn_offset = TEST_DATA_START_GFN + 0x40;
624 query_cmma_range(vm, gfn_offset, 25, &log);
625 assert_cmma_dirty(gfn_offset, 1, &log);
626
627 /**
628 * Query pages [0x33, 0x40).
629 * The dirty bitmap should now look like this:
630 * [0, 0x61): not dirty
631 * [0x61, 0x200): dirty
632 */
633 gfn_offset = TEST_DATA_START_GFN + 0x33;
634 query_cmma_range(vm, gfn_offset, 0x40 - 0x33, &log);
635 assert_cmma_dirty(gfn_offset, 0x40 - 0x33, &log);
636
637 /**
638 * Query the remaining pages [0x61, 0x200).
639 */
640 gfn_offset = TEST_DATA_START_GFN;
641 query_cmma_range(vm, gfn_offset, TEST_DATA_PAGE_COUNT - 0x61, &log);
642 assert_cmma_dirty(TEST_DATA_START_GFN + 0x61, TEST_DATA_PAGE_COUNT - 0x61, &log);
643
644 assert_no_pages_cmma_dirty(vm);
645}
646
647struct testdef {
648 const char *name;
649 void (*test)(void);
650} testlist[] = {
651 { "migration mode and dirty tracking", test_migration_mode },
652 { "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
653 { "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
654 { "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
655};
656
657/**
658 * The kernel may support CMMA, but the machine may not (i.e. if running as
659 * guest-3).
660 *
661 * In this case, the CMMA capabilities are all there, but the CMMA-related
662 * ioctls fail. To find out whether the machine supports CMMA, create a
663 * temporary VM and then query the CMMA feature of the VM.
664 */
665static int machine_has_cmma(void)
666{
ef5b6a54 667 struct kvm_vm *vm = vm_create_barebones();
e325ba22
NB
668 int r;
669
670 r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
671 kvm_vm_free(vm);
672
673 return r;
674}
675
676int main(int argc, char *argv[])
677{
678 int idx;
679
680 TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
681 TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_CMMA_MIGRATION));
682 TEST_REQUIRE(machine_has_cmma());
683
684 ksft_print_header();
685
686 ksft_set_plan(ARRAY_SIZE(testlist));
687
688 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
689 testlist[idx].test();
690 ksft_test_result_pass("%s\n", testlist[idx].name);
691 }
692
693 ksft_finished(); /* Print results and exit() accordingly */
694}