]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: selftests: Test TPR / CR8 sync and interrupt masking
authorMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Fri, 5 Dec 2025 22:49:37 +0000 (14:49 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 18:50:50 +0000 (10:50 -0800)
Add a few extra TPR / CR8 tests to x86's xapic_state_test to see if:
  * TPR is 0 on reset,
  * TPR, PPR and CR8 are equal inside the guest,
  * TPR and CR8 read equal by the host after a VMExit
  * TPR borderline values set by the host correctly mask interrupts in the
    guest.

These hopefully will catch the most obvious cases of improper TPR sync or
interrupt masking.

Do these tests both in x2APIC and xAPIC modes.
The x2APIC mode uses SELF_IPI register to trigger interrupts to give it a
bit of exercise too.

Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Acked-by: Naveen N Rao (AMD) <naveen@kernel.org>
[sean: put code in separate test]
Link: https://patch.msgid.link/20251205224937.428122-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/Makefile.kvm
tools/testing/selftests/kvm/include/x86/apic.h
tools/testing/selftests/kvm/x86/xapic_tpr_test.c [new file with mode: 0644]

index ba5c2b643efaa263f90185b72fe59ecb0fcd6dbc..3789890421bdb10104f13cfef052dbbe21d82a0b 100644 (file)
@@ -124,6 +124,7 @@ TEST_GEN_PROGS_x86 += x86/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86 += x86/apic_bus_clock_test
 TEST_GEN_PROGS_x86 += x86/xapic_ipi_test
 TEST_GEN_PROGS_x86 += x86/xapic_state_test
+TEST_GEN_PROGS_x86 += x86/xapic_tpr_test
 TEST_GEN_PROGS_x86 += x86/xcr0_cpuid_test
 TEST_GEN_PROGS_x86 += x86/xss_msr_test
 TEST_GEN_PROGS_x86 += x86/debug_regs
index 80fe9f69b38db069fee98766faab0bdcff8dcf5f..e9b9aebaac9731abd43b67827b36c52d4cfebb86 100644 (file)
@@ -28,6 +28,8 @@
 #define                GET_APIC_ID_FIELD(x)    (((x) >> 24) & 0xFF)
 #define        APIC_TASKPRI    0x80
 #define        APIC_PROCPRI    0xA0
+#define        GET_APIC_PRI(x) (((x) & GENMASK(7, 4)) >> 4)
+#define        SET_APIC_PRI(x, y) (((x) & ~GENMASK(7, 4)) | (y << 4))
 #define        APIC_EOI        0xB0
 #define        APIC_SPIV       0xF0
 #define                APIC_SPIV_FOCUS_DISABLED        (1 << 9)
@@ -67,6 +69,7 @@
 #define        APIC_TMICT      0x380
 #define        APIC_TMCCT      0x390
 #define        APIC_TDCR       0x3E0
+#define        APIC_SELF_IPI   0x3F0
 
 void apic_disable(void);
 void xapic_enable(void);
diff --git a/tools/testing/selftests/kvm/x86/xapic_tpr_test.c b/tools/testing/selftests/kvm/x86/xapic_tpr_test.c
new file mode 100644 (file)
index 0000000..3862134
--- /dev/null
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <fcntl.h>
+#include <stdatomic.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include "apic.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+static bool is_x2apic;
+
+#define IRQ_VECTOR 0x20
+
+/* See also the comment at similar assertion in memslot_perf_test.c */
+static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
+
+static atomic_uint tpr_guest_irq_sync_val;
+
+static void tpr_guest_irq_sync_flag_reset(void)
+{
+       atomic_store_explicit(&tpr_guest_irq_sync_val, 0,
+                             memory_order_release);
+}
+
+static unsigned int tpr_guest_irq_sync_val_get(void)
+{
+       return atomic_load_explicit(&tpr_guest_irq_sync_val,
+                                   memory_order_acquire);
+}
+
+static void tpr_guest_irq_sync_val_inc(void)
+{
+       atomic_fetch_add_explicit(&tpr_guest_irq_sync_val, 1,
+                                 memory_order_acq_rel);
+}
+
+static void tpr_guest_irq_handler_xapic(struct ex_regs *regs)
+{
+       tpr_guest_irq_sync_val_inc();
+
+       xapic_write_reg(APIC_EOI, 0);
+}
+
+static void tpr_guest_irq_handler_x2apic(struct ex_regs *regs)
+{
+       tpr_guest_irq_sync_val_inc();
+
+       x2apic_write_reg(APIC_EOI, 0);
+}
+
+static void tpr_guest_irq_queue(void)
+{
+       if (is_x2apic) {
+               x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR);
+       } else {
+               uint32_t icr, icr2;
+
+               icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
+                       IRQ_VECTOR;
+               icr2 = 0;
+
+               xapic_write_reg(APIC_ICR2, icr2);
+               xapic_write_reg(APIC_ICR, icr);
+       }
+}
+
+static uint8_t tpr_guest_tpr_get(void)
+{
+       uint32_t taskpri;
+
+       if (is_x2apic)
+               taskpri = x2apic_read_reg(APIC_TASKPRI);
+       else
+               taskpri = xapic_read_reg(APIC_TASKPRI);
+
+       return GET_APIC_PRI(taskpri);
+}
+
+static uint8_t tpr_guest_ppr_get(void)
+{
+       uint32_t procpri;
+
+       if (is_x2apic)
+               procpri = x2apic_read_reg(APIC_PROCPRI);
+       else
+               procpri = xapic_read_reg(APIC_PROCPRI);
+
+       return GET_APIC_PRI(procpri);
+}
+
+static uint8_t tpr_guest_cr8_get(void)
+{
+       uint64_t cr8;
+
+       asm volatile ("mov %%cr8, %[cr8]\n\t" : [cr8] "=r"(cr8));
+
+       return cr8 & GENMASK(3, 0);
+}
+
+static void tpr_guest_check_tpr_ppr_cr8_equal(void)
+{
+       uint8_t tpr;
+
+       tpr = tpr_guest_tpr_get();
+
+       GUEST_ASSERT_EQ(tpr_guest_ppr_get(), tpr);
+       GUEST_ASSERT_EQ(tpr_guest_cr8_get(), tpr);
+}
+
+static void tpr_guest_code(void)
+{
+       cli();
+
+       if (is_x2apic)
+               x2apic_enable();
+       else
+               xapic_enable();
+
+       GUEST_ASSERT_EQ(tpr_guest_tpr_get(), 0);
+       tpr_guest_check_tpr_ppr_cr8_equal();
+
+       tpr_guest_irq_queue();
+
+       /* TPR = 0 but IRQ masked by IF=0, should not fire */
+       udelay(1000);
+       GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 0);
+
+       sti();
+
+       /* IF=1 now, IRQ should fire */
+       while (tpr_guest_irq_sync_val_get() == 0)
+               cpu_relax();
+       GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 1);
+
+       GUEST_SYNC(true);
+       tpr_guest_check_tpr_ppr_cr8_equal();
+
+       tpr_guest_irq_queue();
+
+       /* IRQ masked by barely high enough TPR now, should not fire */
+       udelay(1000);
+       GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 1);
+
+       GUEST_SYNC(false);
+       tpr_guest_check_tpr_ppr_cr8_equal();
+
+       /* TPR barely low enough now to unmask IRQ, should fire */
+       while (tpr_guest_irq_sync_val_get() == 1)
+               cpu_relax();
+       GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 2);
+
+       GUEST_DONE();
+}
+
+static uint8_t lapic_tpr_get(struct kvm_lapic_state *xapic)
+{
+       return GET_APIC_PRI(*((u32 *)&xapic->regs[APIC_TASKPRI]));
+}
+
+static void lapic_tpr_set(struct kvm_lapic_state *xapic, uint8_t val)
+{
+       u32 *taskpri = (u32 *)&xapic->regs[APIC_TASKPRI];
+
+       *taskpri = SET_APIC_PRI(*taskpri, val);
+}
+
+static uint8_t sregs_tpr(struct kvm_sregs *sregs)
+{
+       return sregs->cr8 & GENMASK(3, 0);
+}
+
+static void test_tpr_check_tpr_zero(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic_state xapic;
+
+       vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
+
+       TEST_ASSERT_EQ(lapic_tpr_get(&xapic), 0);
+}
+
+static void test_tpr_check_tpr_cr8_equal(struct kvm_vcpu *vcpu)
+{
+       struct kvm_sregs sregs;
+       struct kvm_lapic_state xapic;
+
+       vcpu_sregs_get(vcpu, &sregs);
+       vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
+
+       TEST_ASSERT_EQ(sregs_tpr(&sregs), lapic_tpr_get(&xapic));
+}
+
+static void test_tpr_set_tpr_for_irq(struct kvm_vcpu *vcpu, bool mask)
+{
+       struct kvm_lapic_state xapic;
+       uint8_t tpr;
+
+       static_assert(IRQ_VECTOR >= 16, "invalid IRQ vector number");
+       tpr = IRQ_VECTOR / 16;
+       if (!mask)
+               tpr--;
+
+       vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
+       lapic_tpr_set(&xapic, tpr);
+       vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
+}
+
+static void test_tpr(bool __is_x2apic)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       bool done = false;
+
+       is_x2apic = __is_x2apic;
+
+       vm = vm_create_with_one_vcpu(&vcpu, tpr_guest_code);
+       if (is_x2apic) {
+               vm_install_exception_handler(vm, IRQ_VECTOR,
+                                            tpr_guest_irq_handler_x2apic);
+       } else {
+               vm_install_exception_handler(vm, IRQ_VECTOR,
+                                            tpr_guest_irq_handler_xapic);
+               vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_X2APIC);
+               virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
+       }
+
+       sync_global_to_guest(vcpu->vm, is_x2apic);
+
+       /* According to the SDM/APM the TPR value at reset is 0 */
+       test_tpr_check_tpr_zero(vcpu);
+       test_tpr_check_tpr_cr8_equal(vcpu);
+
+       tpr_guest_irq_sync_flag_reset();
+       sync_global_to_guest(vcpu->vm, tpr_guest_irq_sync_val);
+
+       while (!done) {
+               struct ucall uc;
+
+               alarm(2);
+               vcpu_run(vcpu);
+               alarm(0);
+
+               switch (get_ucall(vcpu, &uc)) {
+               case UCALL_ABORT:
+                       REPORT_GUEST_ASSERT(uc);
+                       break;
+               case UCALL_DONE:
+                       test_tpr_check_tpr_cr8_equal(vcpu);
+                       done = true;
+                       break;
+               case UCALL_SYNC:
+                       test_tpr_check_tpr_cr8_equal(vcpu);
+                       test_tpr_set_tpr_for_irq(vcpu, uc.args[1]);
+                       break;
+               default:
+                       TEST_FAIL("Unknown ucall result 0x%lx", uc.cmd);
+                       break;
+               }
+       }
+       kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+       /*
+        * Use separate VMs for the xAPIC and x2APIC tests so that x2APIC can
+        * be fully hidden from the guest.  KVM disallows changing CPUID after
+        * KVM_RUN and AVIC is disabled if _any_ vCPU is allowed to use x2APIC.
+        */
+       test_tpr(false);
+       test_tpr(true);
+}