]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86: move steal time initialization to vcpu entry time
authorMarcelo Tosatti <mtosatti@redhat.com>
Wed, 14 Oct 2015 22:33:09 +0000 (19:33 -0300)
committerZefan Li <lizefan@huawei.com>
Mon, 21 Mar 2016 01:17:59 +0000 (09:17 +0800)
commit 7cae2bedcbd4680b155999655e49c27b9cf020fa upstream.

As reported at https://bugs.launchpad.net/qemu/+bug/1494350,
it is possible to have vcpu->arch.st.last_steal initialized
from a thread other than vcpu thread, say the iothread, via
KVM_SET_MSRS.

Which can cause an overflow later (when subtracting from vcpu threads
sched_info.run_delay).

To avoid that, move steal time accumulation to vcpu entry time,
before copying steal time data to guest.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Reviewed-by: David Matlack <dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Zefan Li <lizefan@huawei.com>
arch/x86/kvm/x86.c

index 4ad2b7bb382e2b9166816ff92c9ec7da29491256..9cc83e287adf89cac7f447f811ef5a22b6da11f2 100644 (file)
@@ -1545,6 +1545,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
 
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
+       accumulate_steal_time(vcpu);
+
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
@@ -1665,12 +1667,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (!(data & KVM_MSR_ENABLED))
                        break;
 
-               vcpu->arch.st.last_steal = current->sched_info.run_delay;
-
-               preempt_disable();
-               accumulate_steal_time(vcpu);
-               preempt_enable();
-
                kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 
                break;
@@ -2327,7 +2323,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                vcpu->cpu = cpu;
        }
 
-       accumulate_steal_time(vcpu);
        kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 }