]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/kvm/x86_64/hyperv_features.c
Merge tag 'kvm-x86-generic-6.8' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / tools / testing / selftests / kvm / x86_64 / hyperv_features.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021, Red Hat, Inc.
4 *
5 * Tests for Hyper-V features enablement
6 */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15
16 /*
17 * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18 * but to activate the feature it is sufficient to set it to a non-zero
19 * value. Use BIT(0) for that.
20 */
21 #define HV_PV_SPINLOCKS_TEST \
22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
23
24 struct msr_data {
25 uint32_t idx;
26 bool fault_expected;
27 bool write;
28 u64 write_val;
29 };
30
31 struct hcall_data {
32 uint64_t control;
33 uint64_t expect;
34 bool ud_expected;
35 };
36
37 static bool is_write_only_msr(uint32_t msr)
38 {
39 return msr == HV_X64_MSR_EOI;
40 }
41
42 static void guest_msr(struct msr_data *msr)
43 {
44 uint8_t vector = 0;
45 uint64_t msr_val = 0;
46
47 GUEST_ASSERT(msr->idx);
48
49 if (msr->write)
50 vector = wrmsr_safe(msr->idx, msr->write_val);
51
52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53 vector = rdmsr_safe(msr->idx, &msr_val);
54
55 if (msr->fault_expected)
56 __GUEST_ASSERT(vector == GP_VECTOR,
57 "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
58 msr->write ? "WR" : "RD", msr->idx, vector);
59 else
60 __GUEST_ASSERT(!vector,
61 "Expected success on %sMSR(0x%x), got vector '0x%x'",
62 msr->write ? "WR" : "RD", msr->idx, vector);
63
64 if (vector || is_write_only_msr(msr->idx))
65 goto done;
66
67 if (msr->write)
68 __GUEST_ASSERT(!vector,
69 "WRMSR(0x%x) to '0x%lx', RDMSR read '0x%lx'",
70 msr->idx, msr->write_val, msr_val);
71
72 /* Invariant TSC bit appears when TSC invariant control MSR is written to */
73 if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
74 if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
75 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
76 else
77 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
78 !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
79 }
80
81 done:
82 GUEST_DONE();
83 }
84
85 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
86 {
87 u64 res, input, output;
88 uint8_t vector;
89
90 GUEST_ASSERT_NE(hcall->control, 0);
91
92 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
93 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
94
95 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
96 input = pgs_gpa;
97 output = pgs_gpa + 4096;
98 } else {
99 input = output = 0;
100 }
101
102 vector = __hyperv_hypercall(hcall->control, input, output, &res);
103 if (hcall->ud_expected) {
104 __GUEST_ASSERT(vector == UD_VECTOR,
105 "Expected #UD for control '%lu', got vector '0x%x'",
106 hcall->control, vector);
107 } else {
108 __GUEST_ASSERT(!vector,
109 "Expected no exception for control '%lu', got vector '0x%x'",
110 hcall->control, vector);
111 GUEST_ASSERT_EQ(res, hcall->expect);
112 }
113
114 GUEST_DONE();
115 }
116
117 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
118 {
119 /*
120 * Enable all supported Hyper-V features, then clear the leafs holding
121 * the features that will be tested one by one.
122 */
123 vcpu_set_hv_cpuid(vcpu);
124
125 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
126 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
127 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
128 }
129
130 static void guest_test_msrs_access(void)
131 {
132 struct kvm_cpuid2 *prev_cpuid = NULL;
133 struct kvm_vcpu *vcpu;
134 struct kvm_vm *vm;
135 struct ucall uc;
136 int stage = 0;
137 vm_vaddr_t msr_gva;
138 struct msr_data *msr;
139 bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
140
141 while (true) {
142 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
143
144 msr_gva = vm_vaddr_alloc_page(vm);
145 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
146 msr = addr_gva2hva(vm, msr_gva);
147
148 vcpu_args_set(vcpu, 1, msr_gva);
149 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
150
151 if (!prev_cpuid) {
152 vcpu_reset_hv_cpuid(vcpu);
153
154 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
155 } else {
156 vcpu_init_cpuid(vcpu, prev_cpuid);
157 }
158
159 vm_init_descriptor_tables(vm);
160 vcpu_init_descriptor_tables(vcpu);
161
162 /* TODO: Make this entire test easier to maintain. */
163 if (stage >= 21)
164 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
165
166 switch (stage) {
167 case 0:
168 /*
169 * Only available when Hyper-V identification is set
170 */
171 msr->idx = HV_X64_MSR_GUEST_OS_ID;
172 msr->write = false;
173 msr->fault_expected = true;
174 break;
175 case 1:
176 msr->idx = HV_X64_MSR_HYPERCALL;
177 msr->write = false;
178 msr->fault_expected = true;
179 break;
180 case 2:
181 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
182 /*
183 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
184 * HV_X64_MSR_HYPERCALL available.
185 */
186 msr->idx = HV_X64_MSR_GUEST_OS_ID;
187 msr->write = true;
188 msr->write_val = HYPERV_LINUX_OS_ID;
189 msr->fault_expected = false;
190 break;
191 case 3:
192 msr->idx = HV_X64_MSR_GUEST_OS_ID;
193 msr->write = false;
194 msr->fault_expected = false;
195 break;
196 case 4:
197 msr->idx = HV_X64_MSR_HYPERCALL;
198 msr->write = false;
199 msr->fault_expected = false;
200 break;
201
202 case 5:
203 msr->idx = HV_X64_MSR_VP_RUNTIME;
204 msr->write = false;
205 msr->fault_expected = true;
206 break;
207 case 6:
208 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
209 msr->idx = HV_X64_MSR_VP_RUNTIME;
210 msr->write = false;
211 msr->fault_expected = false;
212 break;
213 case 7:
214 /* Read only */
215 msr->idx = HV_X64_MSR_VP_RUNTIME;
216 msr->write = true;
217 msr->write_val = 1;
218 msr->fault_expected = true;
219 break;
220
221 case 8:
222 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
223 msr->write = false;
224 msr->fault_expected = true;
225 break;
226 case 9:
227 vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
228 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
229 msr->write = false;
230 msr->fault_expected = false;
231 break;
232 case 10:
233 /* Read only */
234 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
235 msr->write = true;
236 msr->write_val = 1;
237 msr->fault_expected = true;
238 break;
239
240 case 11:
241 msr->idx = HV_X64_MSR_VP_INDEX;
242 msr->write = false;
243 msr->fault_expected = true;
244 break;
245 case 12:
246 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
247 msr->idx = HV_X64_MSR_VP_INDEX;
248 msr->write = false;
249 msr->fault_expected = false;
250 break;
251 case 13:
252 /* Read only */
253 msr->idx = HV_X64_MSR_VP_INDEX;
254 msr->write = true;
255 msr->write_val = 1;
256 msr->fault_expected = true;
257 break;
258
259 case 14:
260 msr->idx = HV_X64_MSR_RESET;
261 msr->write = false;
262 msr->fault_expected = true;
263 break;
264 case 15:
265 vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
266 msr->idx = HV_X64_MSR_RESET;
267 msr->write = false;
268 msr->fault_expected = false;
269 break;
270 case 16:
271 msr->idx = HV_X64_MSR_RESET;
272 msr->write = true;
273 /*
274 * TODO: the test only writes '0' to HV_X64_MSR_RESET
275 * at the moment, writing some other value there will
276 * trigger real vCPU reset and the code is not prepared
277 * to handle it yet.
278 */
279 msr->write_val = 0;
280 msr->fault_expected = false;
281 break;
282
283 case 17:
284 msr->idx = HV_X64_MSR_REFERENCE_TSC;
285 msr->write = false;
286 msr->fault_expected = true;
287 break;
288 case 18:
289 vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
290 msr->idx = HV_X64_MSR_REFERENCE_TSC;
291 msr->write = false;
292 msr->fault_expected = false;
293 break;
294 case 19:
295 msr->idx = HV_X64_MSR_REFERENCE_TSC;
296 msr->write = true;
297 msr->write_val = 0;
298 msr->fault_expected = false;
299 break;
300
301 case 20:
302 msr->idx = HV_X64_MSR_EOM;
303 msr->write = false;
304 msr->fault_expected = true;
305 break;
306 case 21:
307 /*
308 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
309 * capability enabled and guest visible CPUID bit unset.
310 */
311 msr->idx = HV_X64_MSR_EOM;
312 msr->write = false;
313 msr->fault_expected = true;
314 break;
315 case 22:
316 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
317 msr->idx = HV_X64_MSR_EOM;
318 msr->write = false;
319 msr->fault_expected = false;
320 break;
321 case 23:
322 msr->idx = HV_X64_MSR_EOM;
323 msr->write = true;
324 msr->write_val = 0;
325 msr->fault_expected = false;
326 break;
327
328 case 24:
329 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
330 msr->write = false;
331 msr->fault_expected = true;
332 break;
333 case 25:
334 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
335 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
336 msr->write = false;
337 msr->fault_expected = false;
338 break;
339 case 26:
340 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
341 msr->write = true;
342 msr->write_val = 0;
343 msr->fault_expected = false;
344 break;
345 case 27:
346 /* Direct mode test */
347 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
348 msr->write = true;
349 msr->write_val = 1 << 12;
350 msr->fault_expected = true;
351 break;
352 case 28:
353 vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
354 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
355 msr->write = true;
356 msr->write_val = 1 << 12;
357 msr->fault_expected = false;
358 break;
359
360 case 29:
361 msr->idx = HV_X64_MSR_EOI;
362 msr->write = false;
363 msr->fault_expected = true;
364 break;
365 case 30:
366 vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
367 msr->idx = HV_X64_MSR_EOI;
368 msr->write = true;
369 msr->write_val = 1;
370 msr->fault_expected = false;
371 break;
372
373 case 31:
374 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
375 msr->write = false;
376 msr->fault_expected = true;
377 break;
378 case 32:
379 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
380 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
381 msr->write = false;
382 msr->fault_expected = false;
383 break;
384 case 33:
385 /* Read only */
386 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
387 msr->write = true;
388 msr->write_val = 1;
389 msr->fault_expected = true;
390 break;
391
392 case 34:
393 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
394 msr->write = false;
395 msr->fault_expected = true;
396 break;
397 case 35:
398 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
399 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
400 msr->write = false;
401 msr->fault_expected = false;
402 break;
403 case 36:
404 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
405 msr->write = true;
406 msr->write_val = 1;
407 msr->fault_expected = false;
408 break;
409 case 37:
410 /* Can only write '0' */
411 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
412 msr->write = true;
413 msr->write_val = 1;
414 msr->fault_expected = true;
415 break;
416
417 case 38:
418 msr->idx = HV_X64_MSR_CRASH_P0;
419 msr->write = false;
420 msr->fault_expected = true;
421 break;
422 case 39:
423 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
424 msr->idx = HV_X64_MSR_CRASH_P0;
425 msr->write = false;
426 msr->fault_expected = false;
427 break;
428 case 40:
429 msr->idx = HV_X64_MSR_CRASH_P0;
430 msr->write = true;
431 msr->write_val = 1;
432 msr->fault_expected = false;
433 break;
434
435 case 41:
436 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
437 msr->write = false;
438 msr->fault_expected = true;
439 break;
440 case 42:
441 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
442 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
443 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
444 msr->write = false;
445 msr->fault_expected = false;
446 break;
447 case 43:
448 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
449 msr->write = true;
450 msr->write_val = 0;
451 msr->fault_expected = false;
452 break;
453
454 case 44:
455 /* MSR is not available when CPUID feature bit is unset */
456 if (!has_invtsc)
457 continue;
458 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
459 msr->write = false;
460 msr->fault_expected = true;
461 break;
462 case 45:
463 /* MSR is vailable when CPUID feature bit is set */
464 if (!has_invtsc)
465 continue;
466 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
467 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
468 msr->write = false;
469 msr->fault_expected = false;
470 break;
471 case 46:
472 /* Writing bits other than 0 is forbidden */
473 if (!has_invtsc)
474 continue;
475 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
476 msr->write = true;
477 msr->write_val = 0xdeadbeef;
478 msr->fault_expected = true;
479 break;
480 case 47:
481 /* Setting bit 0 enables the feature */
482 if (!has_invtsc)
483 continue;
484 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
485 msr->write = true;
486 msr->write_val = 1;
487 msr->fault_expected = false;
488 break;
489
490 default:
491 kvm_vm_free(vm);
492 return;
493 }
494
495 vcpu_set_cpuid(vcpu);
496
497 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
498
499 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
500 msr->idx, msr->write ? "write" : "read");
501
502 vcpu_run(vcpu);
503 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
504
505 switch (get_ucall(vcpu, &uc)) {
506 case UCALL_ABORT:
507 REPORT_GUEST_ASSERT(uc);
508 return;
509 case UCALL_DONE:
510 break;
511 default:
512 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
513 return;
514 }
515
516 stage++;
517 kvm_vm_free(vm);
518 }
519 }
520
521 static void guest_test_hcalls_access(void)
522 {
523 struct kvm_cpuid2 *prev_cpuid = NULL;
524 struct kvm_vcpu *vcpu;
525 struct kvm_vm *vm;
526 struct ucall uc;
527 int stage = 0;
528 vm_vaddr_t hcall_page, hcall_params;
529 struct hcall_data *hcall;
530
531 while (true) {
532 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
533
534 vm_init_descriptor_tables(vm);
535 vcpu_init_descriptor_tables(vcpu);
536
537 /* Hypercall input/output */
538 hcall_page = vm_vaddr_alloc_pages(vm, 2);
539 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
540
541 hcall_params = vm_vaddr_alloc_page(vm);
542 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
543 hcall = addr_gva2hva(vm, hcall_params);
544
545 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
546 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
547
548 if (!prev_cpuid) {
549 vcpu_reset_hv_cpuid(vcpu);
550
551 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
552 } else {
553 vcpu_init_cpuid(vcpu, prev_cpuid);
554 }
555
556 switch (stage) {
557 case 0:
558 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
559 hcall->control = 0xbeef;
560 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
561 break;
562
563 case 1:
564 hcall->control = HVCALL_POST_MESSAGE;
565 hcall->expect = HV_STATUS_ACCESS_DENIED;
566 break;
567 case 2:
568 vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
569 hcall->control = HVCALL_POST_MESSAGE;
570 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
571 break;
572
573 case 3:
574 hcall->control = HVCALL_SIGNAL_EVENT;
575 hcall->expect = HV_STATUS_ACCESS_DENIED;
576 break;
577 case 4:
578 vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
579 hcall->control = HVCALL_SIGNAL_EVENT;
580 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
581 break;
582
583 case 5:
584 hcall->control = HVCALL_RESET_DEBUG_SESSION;
585 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
586 break;
587 case 6:
588 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
589 hcall->control = HVCALL_RESET_DEBUG_SESSION;
590 hcall->expect = HV_STATUS_ACCESS_DENIED;
591 break;
592 case 7:
593 vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
594 hcall->control = HVCALL_RESET_DEBUG_SESSION;
595 hcall->expect = HV_STATUS_OPERATION_DENIED;
596 break;
597
598 case 8:
599 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
600 hcall->expect = HV_STATUS_ACCESS_DENIED;
601 break;
602 case 9:
603 vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
604 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
605 hcall->expect = HV_STATUS_SUCCESS;
606 break;
607 case 10:
608 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
609 hcall->expect = HV_STATUS_ACCESS_DENIED;
610 break;
611 case 11:
612 vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
613 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
614 hcall->expect = HV_STATUS_SUCCESS;
615 break;
616
617 case 12:
618 hcall->control = HVCALL_SEND_IPI;
619 hcall->expect = HV_STATUS_ACCESS_DENIED;
620 break;
621 case 13:
622 vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
623 hcall->control = HVCALL_SEND_IPI;
624 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
625 break;
626 case 14:
627 /* Nothing in 'sparse banks' -> success */
628 hcall->control = HVCALL_SEND_IPI_EX;
629 hcall->expect = HV_STATUS_SUCCESS;
630 break;
631
632 case 15:
633 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
634 hcall->expect = HV_STATUS_ACCESS_DENIED;
635 break;
636 case 16:
637 vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
638 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
639 hcall->expect = HV_STATUS_SUCCESS;
640 break;
641 case 17:
642 /* XMM fast hypercall */
643 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
644 hcall->ud_expected = true;
645 break;
646 case 18:
647 vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
648 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
649 hcall->ud_expected = false;
650 hcall->expect = HV_STATUS_SUCCESS;
651 break;
652 case 19:
653 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
654 hcall->expect = HV_STATUS_ACCESS_DENIED;
655 break;
656 case 20:
657 vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
658 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
659 hcall->expect = HV_STATUS_INVALID_PARAMETER;
660 break;
661 case 21:
662 kvm_vm_free(vm);
663 return;
664 }
665
666 vcpu_set_cpuid(vcpu);
667
668 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
669
670 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
671
672 vcpu_run(vcpu);
673 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
674
675 switch (get_ucall(vcpu, &uc)) {
676 case UCALL_ABORT:
677 REPORT_GUEST_ASSERT(uc);
678 return;
679 case UCALL_DONE:
680 break;
681 default:
682 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
683 return;
684 }
685
686 stage++;
687 kvm_vm_free(vm);
688 }
689 }
690
691 int main(void)
692 {
693 pr_info("Testing access to Hyper-V specific MSRs\n");
694 guest_test_msrs_access();
695
696 pr_info("Testing access to Hyper-V hypercalls\n");
697 guest_test_hcalls_access();
698 }