]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: selftests: Extend vmx_close_while_nested_test to cover SVM
authorYosry Ahmed <yosry.ahmed@linux.dev>
Tue, 21 Oct 2025 07:47:16 +0000 (07:47 +0000)
committerSean Christopherson <seanjc@google.com>
Fri, 21 Nov 2025 00:19:53 +0000 (16:19 -0800)
Add SVM L1 code to run the nested guest, and allow the test to run with
SVM as well as VMX.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251021074736.1324328-4-yosry.ahmed@linux.dev
[sean: rename to "nested_close_kvm_test" to provide nested_* sorting]
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/Makefile.kvm
tools/testing/selftests/kvm/x86/nested_close_kvm_test.c [moved from tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c with 64% similarity]

index 148d427ff24befa7e144d9214feb3e34018b7d10..89ad8c82a7b20d51259815cefefb84615d3c4449 100644 (file)
@@ -88,6 +88,7 @@ TEST_GEN_PROGS_x86 += x86/kvm_pv_test
 TEST_GEN_PROGS_x86 += x86/kvm_buslock_test
 TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
 TEST_GEN_PROGS_x86 += x86/msrs_test
+TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test
 TEST_GEN_PROGS_x86 += x86/nested_emulation_test
 TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
 TEST_GEN_PROGS_x86 += x86/platform_info_test
@@ -111,7 +112,6 @@ TEST_GEN_PROGS_x86 += x86/ucna_injection_test
 TEST_GEN_PROGS_x86 += x86/userspace_io_test
 TEST_GEN_PROGS_x86 += x86/userspace_msr_exit_test
 TEST_GEN_PROGS_x86 += x86/vmx_apic_access_test
-TEST_GEN_PROGS_x86 += x86/vmx_close_while_nested_test
 TEST_GEN_PROGS_x86 += x86/vmx_dirty_log_test
 TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
 TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
similarity index 64%
rename from tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c
rename to tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
index dad988351493e089bf0f9b62d7604d89b65137a7..f001cb836bfa38e9abb5ab678fdcbe831baaf1c5 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * vmx_close_while_nested
- *
  * Copyright (C) 2019, Red Hat, Inc.
  *
  * Verify that nothing bad happens if a KVM user exits with open
@@ -12,6 +10,7 @@
 #include "kvm_util.h"
 #include "processor.h"
 #include "vmx.h"
+#include "svm_util.h"
 
 #include <string.h>
 #include <sys/ioctl.h>
@@ -22,6 +21,8 @@ enum {
        PORT_L0_EXIT = 0x2000,
 };
 
+#define L2_GUEST_STACK_SIZE 64
+
 static void l2_guest_code(void)
 {
        /* Exit to L0 */
@@ -29,9 +30,8 @@ static void l2_guest_code(void)
                     : : [port] "d" (PORT_L0_EXIT) : "rax");
 }
 
-static void l1_guest_code(struct vmx_pages *vmx_pages)
+static void l1_vmx_code(struct vmx_pages *vmx_pages)
 {
-#define L2_GUEST_STACK_SIZE 64
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 
        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
@@ -45,19 +45,43 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
        GUEST_ASSERT(0);
 }
 
+static void l1_svm_code(struct svm_test_data *svm)
+{
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+       /* Prepare the VMCB for L2 execution. */
+       generic_svm_setup(svm, l2_guest_code,
+                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       run_guest(svm->vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT(0);
+}
+
+static void l1_guest_code(void *data)
+{
+       if (this_cpu_has(X86_FEATURE_VMX))
+               l1_vmx_code(data);
+       else
+               l1_svm_code(data);
+}
+
 int main(int argc, char *argv[])
 {
-       vm_vaddr_t vmx_pages_gva;
+       vm_vaddr_t guest_gva;
        struct kvm_vcpu *vcpu;
        struct kvm_vm *vm;
 
-       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
+                    kvm_cpu_has(X86_FEATURE_SVM));
 
        vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
 
-       /* Allocate VMX pages and shared descriptors (vmx_pages). */
-       vcpu_alloc_vmx(vm, &vmx_pages_gva);
-       vcpu_args_set(vcpu, 1, vmx_pages_gva);
+       if (kvm_cpu_has(X86_FEATURE_VMX))
+               vcpu_alloc_vmx(vm, &guest_gva);
+       else
+               vcpu_alloc_svm(vm, &guest_gva);
+
+       vcpu_args_set(vcpu, 1, guest_gva);
 
        for (;;) {
                volatile struct kvm_run *run = vcpu->run;