]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: selftests: Precisely mask off dynamic fields in CPUID test
authorSean Christopherson <seanjc@google.com>
Thu, 3 Oct 2024 23:43:28 +0000 (16:43 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 1 Nov 2024 16:26:28 +0000 (09:26 -0700)
When comparing vCPU CPUID entries against KVM's supported CPUID, mask off
only the dynamic fields/bits instead of skipping the entire entry.
Precisely masking bits isn't meaningfully more difficult than skipping
entire entries, and will be necessary to maintain test coverage when a
future commit enables OSXSAVE by default, i.e. makes one bit in all of
CPUID.0x1 dynamic.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20241003234337.273364-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/x86_64/cpuid_test.c

index fec03b11b0592ca18bc8f8fbe858f0c090e6f733..f7fdcef5fa59935b7751c724aab2692a5310f0c1 100644 (file)
 #include "kvm_util.h"
 #include "processor.h"
 
-/* CPUIDs known to differ */
-struct {
-       u32 function;
-       u32 index;
-} mangled_cpuids[] = {
-       /*
-        * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
-        * which are not controlled for by this test.
-        */
-       {.function = 0xd, .index = 0},
-       {.function = 0xd, .index = 1},
+struct cpuid_mask {
+       union {
+               struct {
+                       u32 eax;
+                       u32 ebx;
+                       u32 ecx;
+                       u32 edx;
+               };
+               u32 regs[4];
+       };
 };
 
 static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
@@ -56,17 +55,23 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
        GUEST_DONE();
 }
 
-static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie)
+static struct cpuid_mask get_const_cpuid_mask(const struct kvm_cpuid_entry2 *entry)
 {
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(mangled_cpuids); i++) {
-               if (mangled_cpuids[i].function == entrie->function &&
-                   mangled_cpuids[i].index == entrie->index)
-                       return true;
+       struct cpuid_mask mask;
+
+       memset(&mask, 0xff, sizeof(mask));
+
+       switch (entry->function) {
+       case 0xd:
+               /*
+                * CPUID.0xD.{0,1}.EBX enumerate XSAVE size based on the current
+                * XCR0 and IA32_XSS MSR values.
+                */
+               if (entry->index < 2)
+                       mask.ebx = 0;
+               break;
        }
-
-       return false;
+       return mask;
 }
 
 static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
@@ -79,6 +84,8 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
                    "CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent);
 
        for (i = 0; i < cpuid1->nent; i++) {
+               struct cpuid_mask mask;
+
                e1 = &cpuid1->entries[i];
                e2 = &cpuid2->entries[i];
 
@@ -88,15 +95,19 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
                            i, e1->function, e1->index, e1->flags,
                            e2->function, e2->index, e2->flags);
 
-               if (is_cpuid_mangled(e1))
-                       continue;
+               /* Mask off dynamic bits, e.g. OSXSAVE, when comparing entries. */
+               mask = get_const_cpuid_mask(e1);
 
-               TEST_ASSERT(e1->eax == e2->eax && e1->ebx == e2->ebx &&
-                           e1->ecx == e2->ecx && e1->edx == e2->edx,
+               TEST_ASSERT((e1->eax & mask.eax) == (e2->eax & mask.eax) &&
+                           (e1->ebx & mask.ebx) == (e2->ebx & mask.ebx) &&
+                           (e1->ecx & mask.ecx) == (e2->ecx & mask.ecx) &&
+                           (e1->edx & mask.edx) == (e2->edx & mask.edx),
                            "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
                            e1->function, e1->index,
-                           e1->eax, e1->ebx, e1->ecx, e1->edx,
-                           e2->eax, e2->ebx, e2->ecx, e2->edx);
+                           e1->eax & mask.eax, e1->ebx & mask.ebx,
+                           e1->ecx & mask.ecx, e1->edx & mask.edx,
+                           e2->eax & mask.eax, e2->ebx & mask.ebx,
+                           e2->ecx & mask.ecx, e2->edx & mask.edx);
        }
 }