]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: selftests: Extend memstress to run on nested SVM
authorYosry Ahmed <yosry.ahmed@linux.dev>
Tue, 30 Dec 2025 23:01:48 +0000 (15:01 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 20:02:16 +0000 (12:02 -0800)
Add L1 SVM code and generalize the setup code to work for both VMX and
SVM. This allows running 'dirty_log_perf_test -n' on AMD CPUs.

Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251230230150.4150236-20-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/lib/x86/memstress.c

index 407abfc34909da9fa2f135391b1eab5f25f9ee2f..86f4c5e4c43087dac2ab3aebdaf94f25f178e910 100644 (file)
@@ -13,6 +13,7 @@
 #include "kvm_util.h"
 #include "memstress.h"
 #include "processor.h"
+#include "svm_util.h"
 #include "vmx.h"
 
 void memstress_l2_guest_code(uint64_t vcpu_id)
@@ -29,9 +30,10 @@ __asm__(
 "      ud2;"
 );
 
-static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
-{
 #define L2_GUEST_STACK_SIZE 64
+
+static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+{
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
        unsigned long *rsp;
 
@@ -45,10 +47,34 @@ static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
        prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
 
        GUEST_ASSERT(!vmlaunch());
-       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_VMCALL);
+       GUEST_DONE();
+}
+
+static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
+{
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       unsigned long *rsp;
+
+
+       rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
+       *rsp = vcpu_id;
+       generic_svm_setup(svm, memstress_l2_guest_entry, rsp);
+
+       run_guest(svm->vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
        GUEST_DONE();
 }
 
+
+static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
+{
+       if (this_cpu_has(X86_FEATURE_VMX))
+               l1_vmx_code(data, vcpu_id);
+       else
+               l1_svm_code(data, vcpu_id);
+}
+
 uint64_t memstress_nested_pages(int nr_vcpus)
 {
        /*
@@ -78,15 +104,17 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm)
 void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
 {
        struct kvm_regs regs;
-       vm_vaddr_t vmx_gva;
+       vm_vaddr_t nested_gva;
        int vcpu_id;
 
-       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
        TEST_REQUIRE(kvm_cpu_has_tdp());
 
        vm_enable_tdp(vm);
        for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-               vcpu_alloc_vmx(vm, &vmx_gva);
+               if (kvm_cpu_has(X86_FEATURE_VMX))
+                       vcpu_alloc_vmx(vm, &nested_gva);
+               else
+                       vcpu_alloc_svm(vm, &nested_gva);
 
                /* The EPTs are shared across vCPUs, setup the mappings once */
                if (vcpu_id == 0)
@@ -99,6 +127,6 @@ void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
                vcpu_regs_get(vcpus[vcpu_id], &regs);
                regs.rip = (unsigned long) memstress_l1_guest_code;
                vcpu_regs_set(vcpus[vcpu_id], &regs);
-               vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
+               vcpu_args_set(vcpus[vcpu_id], 2, nested_gva, vcpu_id);
        }
 }