]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
KVM: x86/mmu: Remove unnecessary ‘NULL’ values from sptep
[thirdparty/kernel/stable.git] / tools / testing / selftests / kvm / x86_64 / nx_huge_pages_test.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * tools/testing/selftests/kvm/nx_huge_page_test.c
4 *
5 * Usage: to be run via nx_huge_page_test.sh, which does the necessary
6 * environment setup and teardown
7 *
8 * Copyright (C) 2022, Google LLC.
9 */
10
11 #define _GNU_SOURCE
12
13 #include <fcntl.h>
14 #include <stdint.h>
15 #include <time.h>
16
17 #include <test_util.h>
18 #include "kvm_util.h"
19 #include "processor.h"
20
21 #define HPAGE_SLOT 10
22 #define HPAGE_GPA (4UL << 30) /* 4G prevents collision w/ slot 0 */
23 #define HPAGE_GVA HPAGE_GPA /* GVA is arbitrary, so use GPA. */
24 #define PAGES_PER_2MB_HUGE_PAGE 512
25 #define HPAGE_SLOT_NPAGES (3 * PAGES_PER_2MB_HUGE_PAGE)
26
27 /*
28 * Passed by nx_huge_pages_test.sh to provide an easy warning if this test is
29 * being run without it.
30 */
31 #define MAGIC_TOKEN 887563923
32
33 /*
34 * x86 opcode for the return instruction. Used to call into, and then
35 * immediately return from, memory backed with hugepages.
36 */
37 #define RETURN_OPCODE 0xC3
38
39 /* Call the specified memory address. */
40 static void guest_do_CALL(uint64_t target)
41 {
42 ((void (*)(void)) target)();
43 }
44
45 /*
46 * Exit the VM after each memory access so that the userspace component of the
47 * test can make assertions about the pages backing the VM.
48 *
49 * See the below for an explanation of how each access should affect the
50 * backing mappings.
51 */
52 void guest_code(void)
53 {
54 uint64_t hpage_1 = HPAGE_GVA;
55 uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512);
56 uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512);
57
58 READ_ONCE(*(uint64_t *)hpage_1);
59 GUEST_SYNC(1);
60
61 READ_ONCE(*(uint64_t *)hpage_2);
62 GUEST_SYNC(2);
63
64 guest_do_CALL(hpage_1);
65 GUEST_SYNC(3);
66
67 guest_do_CALL(hpage_3);
68 GUEST_SYNC(4);
69
70 READ_ONCE(*(uint64_t *)hpage_1);
71 GUEST_SYNC(5);
72
73 READ_ONCE(*(uint64_t *)hpage_3);
74 GUEST_SYNC(6);
75 }
76
77 static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m)
78 {
79 int actual_pages_2m;
80
81 actual_pages_2m = vm_get_stat(vm, "pages_2m");
82
83 TEST_ASSERT(actual_pages_2m == expected_pages_2m,
84 "Unexpected 2m page count. Expected %d, got %d",
85 expected_pages_2m, actual_pages_2m);
86 }
87
88 static void check_split_count(struct kvm_vm *vm, int expected_splits)
89 {
90 int actual_splits;
91
92 actual_splits = vm_get_stat(vm, "nx_lpage_splits");
93
94 TEST_ASSERT(actual_splits == expected_splits,
95 "Unexpected NX huge page split count. Expected %d, got %d",
96 expected_splits, actual_splits);
97 }
98
99 static void wait_for_reclaim(int reclaim_period_ms)
100 {
101 long reclaim_wait_ms;
102 struct timespec ts;
103
104 reclaim_wait_ms = reclaim_period_ms * 5;
105 ts.tv_sec = reclaim_wait_ms / 1000;
106 ts.tv_nsec = (reclaim_wait_ms - (ts.tv_sec * 1000)) * 1000000;
107 nanosleep(&ts, NULL);
108 }
109
110 void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
111 bool reboot_permissions)
112 {
113 struct kvm_vcpu *vcpu;
114 struct kvm_vm *vm;
115 uint64_t nr_bytes;
116 void *hva;
117 int r;
118
119 vm = vm_create(1);
120
121 if (disable_nx_huge_pages) {
122 r = __vm_disable_nx_huge_pages(vm);
123 if (reboot_permissions) {
124 TEST_ASSERT(!r, "Disabling NX huge pages should succeed if process has reboot permissions");
125 } else {
126 TEST_ASSERT(r == -1 && errno == EPERM,
127 "This process should not have permission to disable NX huge pages");
128 return;
129 }
130 }
131
132 vcpu = vm_vcpu_add(vm, 0, guest_code);
133
134 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
135 HPAGE_GPA, HPAGE_SLOT,
136 HPAGE_SLOT_NPAGES, 0);
137
138 nr_bytes = HPAGE_SLOT_NPAGES * vm->page_size;
139
140 /*
141 * Ensure that KVM can map HPAGE_SLOT with huge pages by mapping the
142 * region into the guest with 2MiB pages whenever TDP is disabled (i.e.
143 * whenever KVM is shadowing the guest page tables).
144 *
145 * When TDP is enabled, KVM should be able to map HPAGE_SLOT with huge
146 * pages irrespective of the guest page size, so map with 4KiB pages
147 * to test that that is the case.
148 */
149 if (kvm_is_tdp_enabled())
150 virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K);
151 else
152 virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_2M);
153
154 hva = addr_gpa2hva(vm, HPAGE_GPA);
155 memset(hva, RETURN_OPCODE, nr_bytes);
156
157 check_2m_page_count(vm, 0);
158 check_split_count(vm, 0);
159
160 /*
161 * The guest code will first read from the first hugepage, resulting
162 * in a huge page mapping being created.
163 */
164 vcpu_run(vcpu);
165 check_2m_page_count(vm, 1);
166 check_split_count(vm, 0);
167
168 /*
169 * Then the guest code will read from the second hugepage, resulting
170 * in another huge page mapping being created.
171 */
172 vcpu_run(vcpu);
173 check_2m_page_count(vm, 2);
174 check_split_count(vm, 0);
175
176 /*
177 * Next, the guest will execute from the first huge page, causing it
178 * to be remapped at 4k.
179 *
180 * If NX huge pages are disabled, this should have no effect.
181 */
182 vcpu_run(vcpu);
183 check_2m_page_count(vm, disable_nx_huge_pages ? 2 : 1);
184 check_split_count(vm, disable_nx_huge_pages ? 0 : 1);
185
186 /*
187 * Executing from the third huge page (previously unaccessed) will
188 * cause part to be mapped at 4k.
189 *
190 * If NX huge pages are disabled, it should be mapped at 2M.
191 */
192 vcpu_run(vcpu);
193 check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
194 check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
195
196 /* Reading from the first huge page again should have no effect. */
197 vcpu_run(vcpu);
198 check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
199 check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
200
201 /* Give recovery thread time to run. */
202 wait_for_reclaim(reclaim_period_ms);
203
204 /*
205 * Now that the reclaimer has run, all the split pages should be gone.
206 *
207 * If NX huge pages are disabled, the relaimer will not run, so
208 * nothing should change from here on.
209 */
210 check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
211 check_split_count(vm, 0);
212
213 /*
214 * The 4k mapping on hpage 3 should have been removed, so check that
215 * reading from it causes a huge page mapping to be installed.
216 */
217 vcpu_run(vcpu);
218 check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 2);
219 check_split_count(vm, 0);
220
221 kvm_vm_free(vm);
222 }
223
224 static void help(char *name)
225 {
226 puts("");
227 printf("usage: %s [-h] [-p period_ms] [-t token]\n", name);
228 puts("");
229 printf(" -p: The NX reclaim period in milliseconds.\n");
230 printf(" -t: The magic token to indicate environment setup is done.\n");
231 printf(" -r: The test has reboot permissions and can disable NX huge pages.\n");
232 puts("");
233 exit(0);
234 }
235
236 int main(int argc, char **argv)
237 {
238 int reclaim_period_ms = 0, token = 0, opt;
239 bool reboot_permissions = false;
240
241 while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
242 switch (opt) {
243 case 'p':
244 reclaim_period_ms = atoi_positive("Reclaim period", optarg);
245 break;
246 case 't':
247 token = atoi_paranoid(optarg);
248 break;
249 case 'r':
250 reboot_permissions = true;
251 break;
252 case 'h':
253 default:
254 help(argv[0]);
255 break;
256 }
257 }
258
259 TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES));
260
261 __TEST_REQUIRE(token == MAGIC_TOKEN,
262 "This test must be run with the magic token %d.\n"
263 "This is done by nx_huge_pages_test.sh, which\n"
264 "also handles environment setup for the test.");
265
266 run_test(reclaim_period_ms, false, reboot_permissions);
267 run_test(reclaim_period_ms, true, reboot_permissions);
268
269 return 0;
270 }
271