]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: selftests: Extend vmx_tsc_adjust_test to cover SVM
authorYosry Ahmed <yosry.ahmed@linux.dev>
Tue, 21 Oct 2025 07:47:20 +0000 (07:47 +0000)
committerSean Christopherson <seanjc@google.com>
Fri, 21 Nov 2025 00:19:56 +0000 (16:19 -0800)
Add SVM L1 code to run the nested guest, and allow the test to run with
SVM as well as VMX.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251021074736.1324328-8-yosry.ahmed@linux.dev
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/Makefile.kvm
tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c [moved from tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c with 61% similarity]

index 5bfd242da1ca583e51baf0818b36515e3c41ae74..3127983c128527334d344d588625e8738d853a25 100644 (file)
@@ -92,6 +92,7 @@ TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test
 TEST_GEN_PROGS_x86 += x86/nested_emulation_test
 TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
 TEST_GEN_PROGS_x86 += x86/nested_invalid_cr3_test
+TEST_GEN_PROGS_x86 += x86/nested_tsc_adjust_test
 TEST_GEN_PROGS_x86 += x86/nested_tsc_scaling_test
 TEST_GEN_PROGS_x86 += x86/platform_info_test
 TEST_GEN_PROGS_x86 += x86/pmu_counters_test
@@ -119,7 +120,6 @@ TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
 TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
 TEST_GEN_PROGS_x86 += x86/vmx_invalid_nested_guest_state
 TEST_GEN_PROGS_x86 += x86/vmx_set_nested_state_test
-TEST_GEN_PROGS_x86 += x86/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86 += x86/apic_bus_clock_test
 TEST_GEN_PROGS_x86 += x86/xapic_ipi_test
 TEST_GEN_PROGS_x86 += x86/xapic_state_test
similarity index 61%
rename from tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c
rename to tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c
index 2dcc0306a0d9b66ca8f4498885d05c9d7e6712e5..2839f650e5c9daac9646faf5b7d974bb2e9e1cc6 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * vmx_tsc_adjust_test
- *
  * Copyright (C) 2018, Google LLC.
  *
  * IA32_TSC_ADJUST test
@@ -22,6 +20,7 @@
 #include "kvm_util.h"
 #include "processor.h"
 #include "vmx.h"
+#include "svm_util.h"
 
 #include <string.h>
 #include <sys/ioctl.h>
@@ -35,6 +34,8 @@
 #define TSC_ADJUST_VALUE (1ll << 32)
 #define TSC_OFFSET_VALUE -(1ll << 48)
 
+#define L2_GUEST_STACK_SIZE 64
+
 enum {
        PORT_ABORT = 0x1000,
        PORT_REPORT,
@@ -72,32 +73,47 @@ static void l2_guest_code(void)
        __asm__ __volatile__("vmcall");
 }
 
-static void l1_guest_code(struct vmx_pages *vmx_pages)
+static void l1_guest_code(void *data)
 {
-#define L2_GUEST_STACK_SIZE 64
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
-       uint32_t control;
 
+       /* Set TSC from L1 and make sure TSC_ADJUST is updated correctly */
        GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
        wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
 
-       GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
-       GUEST_ASSERT(load_vmcs(vmx_pages));
-
-       /* Prepare the VMCS for L2 execution. */
-       prepare_vmcs(vmx_pages, l2_guest_code,
-                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-       control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
-       control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
-       vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
-       vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
-
-       GUEST_ASSERT(!vmlaunch());
-       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       /*
+        * Run L2 with TSC_OFFSET. L2 will write to TSC, and L1 is not
+        * intercepting the write so it should update L1's TSC_ADJUST.
+        */
+       if (this_cpu_has(X86_FEATURE_VMX)) {
+               struct vmx_pages *vmx_pages = data;
+               uint32_t control;
+
+               GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+               GUEST_ASSERT(load_vmcs(vmx_pages));
+
+               prepare_vmcs(vmx_pages, l2_guest_code,
+                            &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+               control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
+               control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
+               vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
+               vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
+
+               GUEST_ASSERT(!vmlaunch());
+               GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       } else {
+               struct svm_test_data *svm = data;
+
+               generic_svm_setup(svm, l2_guest_code,
+                                 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+               svm->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
+               run_guest(svm->vmcb, svm->vmcb_gpa);
+               GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+       }
 
        check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
-
        GUEST_DONE();
 }
 
@@ -109,16 +125,19 @@ static void report(int64_t val)
 
 int main(int argc, char *argv[])
 {
-       vm_vaddr_t vmx_pages_gva;
+       vm_vaddr_t nested_gva;
        struct kvm_vcpu *vcpu;
 
-       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+                    kvm_cpu_has(X86_FEATURE_SVM));
 
-       vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
+       vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+       if (kvm_cpu_has(X86_FEATURE_VMX))
+               vcpu_alloc_vmx(vm, &nested_gva);
+       else
+               vcpu_alloc_svm(vm, &nested_gva);
 
-       /* Allocate VMX pages and shared descriptors (vmx_pages). */
-       vcpu_alloc_vmx(vm, &vmx_pages_gva);
-       vcpu_args_set(vcpu, 1, vmx_pages_gva);
+       vcpu_args_set(vcpu, 1, nested_gva);
 
        for (;;) {
                struct ucall uc;