]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86: Plumb "force_immediate_exit" into kvm_entry() tracepoint
authorSean Christopherson <seanjc@google.com>
Fri, 15 Aug 2025 00:11:53 +0000 (17:11 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Aug 2025 14:25:50 +0000 (16:25 +0200)
[ Upstream commit 9c9025ea003a03f967affd690f39b4ef3452c0f5 ]

Annotate the kvm_entry() tracepoint with "immediate exit" when KVM is
forcing a VM-Exit immediately after VM-Enter, e.g. when KVM wants to
inject an event but needs to first complete some other operation.
Knowing that KVM is (or isn't) forcing an exit is useful information when
debugging issues related to event injection.

Suggested-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20240110012705.506918-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 555c7bf35e28178ef8f428cba46b5738b87cb3ff..93f5237628546e2e44668f669870082880cb2a0b 100644 (file)
@@ -1528,7 +1528,8 @@ struct kvm_x86_ops {
        void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
 
        int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
-       enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu);
+       enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
+                                                 bool force_immediate_exit);
        int (*handle_exit)(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion exit_fastpath);
        int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
index 2c0f9c7d1242da67ad4bfe831b02e99638bcbf96..b4283c2358a6a569508d7f80bcdea1ebcc3a4b8a 100644 (file)
@@ -4005,12 +4005,13 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
        guest_state_exit_irqoff();
 }
 
-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+                                         bool force_immediate_exit)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
 
-       trace_kvm_entry(vcpu);
+       trace_kvm_entry(vcpu, force_immediate_exit);
 
        svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
        svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
index 6c1dcf44c4fa3e51a4e1508692189ba3b28e19db..ab407bc00d84b01aded85058dd0b85b63ca090b6 100644 (file)
  * Tracepoint for guest mode entry.
  */
 TRACE_EVENT(kvm_entry,
-       TP_PROTO(struct kvm_vcpu *vcpu),
-       TP_ARGS(vcpu),
+       TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit),
+       TP_ARGS(vcpu, force_immediate_exit),
 
        TP_STRUCT__entry(
                __field(        unsigned int,   vcpu_id         )
                __field(        unsigned long,  rip             )
+               __field(        bool,           immediate_exit  )
        ),
 
        TP_fast_assign(
                __entry->vcpu_id        = vcpu->vcpu_id;
                __entry->rip            = kvm_rip_read(vcpu);
+               __entry->immediate_exit = force_immediate_exit;
        ),
 
-       TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip)
+       TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip,
+                 __entry->immediate_exit ? "[immediate exit]" : "")
 );
 
 /*
index 390af16d9a67d90885893b7aa12314e0b3149d77..0b495979a02bc3d9c61e4a122fc44bd9e0ab1ffb 100644 (file)
@@ -7171,7 +7171,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
        guest_state_exit_irqoff();
 }
 
-static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
@@ -7198,7 +7198,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                return EXIT_FASTPATH_NONE;
        }
 
-       trace_kvm_entry(vcpu);
+       trace_kvm_entry(vcpu, force_immediate_exit);
 
        if (vmx->ple_window_dirty) {
                vmx->ple_window_dirty = false;
index d224180c56f59f3516ef95b03f245c6ca43915ef..08c3da88f402bcf78a2318fe7fae2cf1b8302fe3 100644 (file)
@@ -10856,7 +10856,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
                             (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
 
-               exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
+               exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, req_immediate_exit);
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;