]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86: do not set st->preempted when going back to user space
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 7 Jun 2022 14:07:11 +0000 (10:07 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Aug 2022 11:20:40 +0000 (13:20 +0200)
[ Upstream commit 54aa83c90198e68eee8b0850c749bc70efb548da ]

Similar to the Xen path, only change the vCPU's reported state if the vCPU
was actually preempted.  The reason for KVM's behavior is that for example
optimistic spinning might not be a good idea if the guest is doing repeated
exits to userspace; however, it is confusing and unlikely to make a difference,
because well-tuned guests will hardly ever exit KVM_RUN in the first place.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/x86.c
arch/x86/kvm/xen.h

index df74ec51c7f3b935c4a59ddafb70e0b224b16df9..91d887fd10ab39b9b18620134a199c4d12358176 100644 (file)
@@ -4651,19 +4651,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        int idx;
 
-       if (vcpu->preempted && !vcpu->arch.guest_state_protected)
-               vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
+       if (vcpu->preempted) {
+               if (!vcpu->arch.guest_state_protected)
+                       vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
 
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-       if (kvm_xen_msr_enabled(vcpu->kvm))
-               kvm_xen_runstate_set_preempted(vcpu);
-       else
-               kvm_steal_time_set_preempted(vcpu);
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+               /*
+                * Take the srcu lock as memslots will be accessed to check the gfn
+                * cache generation against the memslots generation.
+                */
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+               if (kvm_xen_msr_enabled(vcpu->kvm))
+                       kvm_xen_runstate_set_preempted(vcpu);
+               else
+                       kvm_steal_time_set_preempted(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       }
 
        static_call(kvm_x86_vcpu_put)(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
index adbcc9ed59dbc0184ed113b2e557107765109b6f..fda1413f8af953fc6375d89c83cf11fdae3ea8d0 100644 (file)
@@ -103,8 +103,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
         * behalf of the vCPU. Only if the VMM does actually block
         * does it need to enter RUNSTATE_blocked.
         */
-       if (vcpu->preempted)
-               kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
+       if (WARN_ON_ONCE(!vcpu->preempted))
+               return;
+
+       kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
 }
 
 /* 32-bit compatibility definitions, also used natively in 32-bit build */