]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: selftests: Add helpers for locally (un)blocking IRQs on x86
authorSean Christopherson <seanjc@google.com>
Fri, 20 Dec 2024 01:26:17 +0000 (17:26 -0800)
committerSean Christopherson <seanjc@google.com>
Wed, 8 Jan 2025 20:57:03 +0000 (12:57 -0800)
Copy KVM-Unit-Tests' x86 helpers for emitting STI and CLI, comments and
all, and use them throughout x86 selftests.  The safe_halt() and sti_nop()
logic in particular benefits from centralized comments, as the behavior
isn't obvious unless the reader is already aware of the STI shadow.

Cc: Manali Shukla <Manali.Shukla@amd.com>
Link: https://lore.kernel.org/r/20241220012617.3513898-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/x86/processor.h
tools/testing/selftests/kvm/x86/hyperv_ipi.c
tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
tools/testing/selftests/kvm/x86/ucna_injection_test.c
tools/testing/selftests/kvm/x86/xapic_ipi_test.c
tools/testing/selftests/kvm/x86/xapic_state_test.c
tools/testing/selftests/kvm/x86/xen_shinfo_test.c

index 8de7cace1fbff780fe925a3fd75081ced232f371..b89e1a6444f43f0f4d1e66233eeab4321f73431c 100644 (file)
@@ -1335,6 +1335,46 @@ static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
        GUEST_ASSERT(!ret);
 }
 
+/*
+ * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
+ * intended to be a wake event arrives *after* HLT is executed.  Modern CPUs,
+ * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
+ * instruction after STI, *if* RFLAGS.IF=0 before STI.  Note, Intel CPUs may
+ * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
+ */
+static inline void safe_halt(void)
+{
+       asm volatile("sti; hlt");
+}
+
+/*
+ * Enable interrupts and ensure that interrupts are evaluated upon return from
+ * this function, i.e. execute a nop to consume the STi interrupt shadow.
+ */
+static inline void sti_nop(void)
+{
+       asm volatile ("sti; nop");
+}
+
+/*
+ * Enable interrupts for one instruction (nop), to allow the CPU to process all
+ * interrupts that are already pending.
+ */
+static inline void sti_nop_cli(void)
+{
+       asm volatile ("sti; nop; cli");
+}
+
+static inline void sti(void)
+{
+       asm volatile("sti");
+}
+
+static inline void cli(void)
+{
+       asm volatile ("cli");
+}
+
 void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
 
 #define vm_xsave_require_permission(xfeature)  \
index 22c0c124582fd89785bc8acb66d4fc181e8c2852..2b5b4bc6ef7ecb8c51876122ad287c8c134a36f0 100644 (file)
@@ -63,8 +63,10 @@ static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
        /* Signal sender vCPU we're ready */
        ipis_rcvd[vcpu_id] = (u64)-1;
 
-       for (;;)
-               asm volatile("sti; hlt; cli");
+       for (;;) {
+               safe_halt();
+               cli();
+       }
 }
 
 static void guest_ipi_handler(struct ex_regs *regs)
index 916e04248fbbdd13c10a109fa89e515b9692d90f..917b6066cfc123e856dee9f191d72029ee59958c 100644 (file)
@@ -42,10 +42,7 @@ static void l2_guest_code(struct svm_test_data *svm)
        x2apic_write_reg(APIC_ICR,
                APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
 
-       __asm__ __volatile__(
-               "sti\n"
-               "nop\n"
-       );
+       sti_nop();
 
        GUEST_ASSERT(vintr_irq_called);
        GUEST_ASSERT(intr_irq_called);
index 57f157c06b393c98242c313600cc183b4f1fecae..1e5e564523b34550d057d7619bb4894975181898 100644 (file)
@@ -86,7 +86,7 @@ static void ucna_injection_guest_code(void)
        wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
 
        /* Enables interrupt in guest. */
-       asm volatile("sti");
+       sti();
 
        /* Let user space inject the first UCNA */
        GUEST_SYNC(SYNC_FIRST_UCNA);
index a76078a08ff82f527e5b62d4e96234647c957a38..6228c0806e8919000c24929cbd072e8473d4cf80 100644 (file)
@@ -106,7 +106,8 @@ static void halter_guest_code(struct test_data_page *data)
                data->halter_tpr = xapic_read_reg(APIC_TASKPRI);
                data->halter_ppr = xapic_read_reg(APIC_PROCPRI);
                data->hlt_count++;
-               asm volatile("sti; hlt; cli");
+               safe_halt();
+               cli();
                data->wake_count++;
        }
 }
index 88bcca188799bb70e6c2493f5b92c1e7ac5635a5..fdebff1165c7804a012a7cb02b22974c743c6268 100644 (file)
@@ -18,7 +18,7 @@ struct xapic_vcpu {
 
 static void xapic_guest_code(void)
 {
-       asm volatile("cli");
+       cli();
 
        xapic_enable();
 
@@ -38,7 +38,7 @@ static void xapic_guest_code(void)
 
 static void x2apic_guest_code(void)
 {
-       asm volatile("cli");
+       cli();
 
        x2apic_enable();
 
index a59b3c799bb27383800b966472b1498fa3e186fd..287829f850f73e84cc611495598d4f68f6a662a3 100644 (file)
@@ -191,10 +191,7 @@ static void guest_code(void)
        struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
        int i;
 
-       __asm__ __volatile__(
-               "sti\n"
-               "nop\n"
-       );
+       sti_nop();
 
        /* Trigger an interrupt injection */
        GUEST_SYNC(TEST_INJECT_VECTOR);