]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86: Plumb "force_immediate_exit" into kvm_entry() tracepoint
authorSean Christopherson <seanjc@google.com>
Fri, 15 Aug 2025 00:25:28 +0000 (17:25 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Aug 2025 14:28:13 +0000 (16:28 +0200)
[ Upstream commit 9c9025ea003a03f967affd690f39b4ef3452c0f5 ]

Annotate the kvm_entry() tracepoint with "immediate exit" when KVM is
forcing a VM-Exit immediately after VM-Enter, e.g. when KVM wants to
inject an event but needs to first complete some other operation.
Knowing that KVM is (or isn't) forcing an exit is useful information when
debugging issues related to event injection.

Suggested-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20240110012705.506918-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index b5210505abfa5abeee62fc7ac6d664ae0ccacde1..5703600a454e28652ba7328313f8b9ea621b5338 100644 (file)
@@ -1624,7 +1624,8 @@ struct kvm_x86_ops {
        void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
 
        int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
-       enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu);
+       enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
+                                                 bool force_immediate_exit);
        int (*handle_exit)(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion exit_fastpath);
        int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
index abbb84ddfe023c966ec6eca1bc4703fbff34ebd3..5a230be224d1cd6c4129b764160e7ee723ac0298 100644 (file)
@@ -4194,12 +4194,13 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
        guest_state_exit_irqoff();
 }
 
-static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+                                         bool force_immediate_exit)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
 
-       trace_kvm_entry(vcpu);
+       trace_kvm_entry(vcpu, force_immediate_exit);
 
        svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
        svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
index b82e6ed4f024174658114321d85a60529a3df119..c6b4b1728006d5021f958516c15b653d7759f4d1 100644 (file)
  * Tracepoint for guest mode entry.
  */
 TRACE_EVENT(kvm_entry,
-       TP_PROTO(struct kvm_vcpu *vcpu),
-       TP_ARGS(vcpu),
+       TP_PROTO(struct kvm_vcpu *vcpu, bool force_immediate_exit),
+       TP_ARGS(vcpu, force_immediate_exit),
 
        TP_STRUCT__entry(
                __field(        unsigned int,   vcpu_id         )
                __field(        unsigned long,  rip             )
+               __field(        bool,           immediate_exit  )
        ),
 
        TP_fast_assign(
                __entry->vcpu_id        = vcpu->vcpu_id;
                __entry->rip            = kvm_rip_read(vcpu);
+               __entry->immediate_exit = force_immediate_exit;
        ),
 
-       TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip)
+       TP_printk("vcpu %u, rip 0x%lx%s", __entry->vcpu_id, __entry->rip,
+                 __entry->immediate_exit ? "[immediate exit]" : "")
 );
 
 /*
index 9ba4baf2a9e9ab76138784eb11d03c0f3ab297aa..ee501871ddb031a58f88f4d5b7c4d23ed297a207 100644 (file)
@@ -7312,7 +7312,7 @@ out:
        guest_state_exit_irqoff();
 }
 
-static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
@@ -7339,7 +7339,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                return EXIT_FASTPATH_NONE;
        }
 
-       trace_kvm_entry(vcpu);
+       trace_kvm_entry(vcpu, force_immediate_exit);
 
        if (vmx->ple_window_dirty) {
                vmx->ple_window_dirty = false;
index 9944b32b0b308a59cbe2d044f03cfbe9d23391fe..d04066099567e40a41fc4a70da2ebcc4d0bb169e 100644 (file)
@@ -10795,7 +10795,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
                             (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
 
-               exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
+               exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, req_immediate_exit);
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;