]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Acquire SRCU in WRMSR fastpath iff instruction needs to be skipped
authorSean Christopherson <seanjc@google.com>
Tue, 5 Aug 2025 19:05:14 +0000 (12:05 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 19 Aug 2025 18:59:34 +0000 (11:59 -0700)
Acquire SRCU in the WRMSR fastpath if and only if an instruction needs to
be skipped, i.e. only if the fastpath succeeds.  The reasoning in commit
3f2739bd1e0b ("KVM: x86: Acquire SRCU read lock when handling fastpath MSR
writes") about "avoid having to play whack-a-mole" seems sound, but in
hindsight unconditionally acquiring SRCU does more harm than good.

While acquiring/releasing SRCU isn't slow per se, the things that are
_protected_ by kvm->srcu are generally safe to access only in the "slow"
VM-Exit path.  E.g. accessing memslots in generic helpers is never safe,
because accessing guest memory with IRQs disabled is unless unsafe (except
when kvm_vcpu_read_guest_atomic() is used, but that API should never be
used in emulation helpers).

In other words, playing whack-a-mole is actually desirable in this case,
because every access to an asset protected by kvm->srcu warrants further
scrutiny.

Link: https://lore.kernel.org/r/20250805190526.1453366-7-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c

index 366c8c7f2e43c8b692ecbf73d6e2df21f0e984ec..a5d7ab23d43210d97a4ce95eb4eadb4f1ed41d40 100644 (file)
@@ -2159,10 +2159,8 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
 {
        u32 msr = kvm_rcx_read(vcpu);
        u64 data;
-       fastpath_t ret;
        bool handled;
-
-       kvm_vcpu_srcu_read_lock(vcpu);
+       int r;
 
        switch (msr) {
        case APIC_BASE_MSR + (APIC_ICR >> 4):
@@ -2178,19 +2176,16 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
                break;
        }
 
-       if (handled) {
-               if (!kvm_skip_emulated_instruction(vcpu))
-                       ret = EXIT_FASTPATH_EXIT_USERSPACE;
-               else
-                       ret = EXIT_FASTPATH_REENTER_GUEST;
-               trace_kvm_msr_write(msr, data);
-       } else {
-               ret = EXIT_FASTPATH_NONE;
-       }
+       if (!handled)
+               return EXIT_FASTPATH_NONE;
 
+       kvm_vcpu_srcu_read_lock(vcpu);
+       r = kvm_skip_emulated_instruction(vcpu);
        kvm_vcpu_srcu_read_unlock(vcpu);
 
-       return ret;
+       trace_kvm_msr_write(msr, data);
+
+       return r ? EXIT_FASTPATH_REENTER_GUEST : EXIT_FASTPATH_EXIT_USERSPACE;
 }
 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);