]> git.ipfire.org Git - people/arne_f/kernel.git/blobdiff - arch/x86/kvm/x86.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[people/arne_f/kernel.git] / arch / x86 / kvm / x86.c
index 13728dbdbb5604ef5ce942e680ac1ac51c73d0c6..a60bdbccff5189b5a98b9a7fcc6a3b9f7ff5eeec 100644 (file)
@@ -1441,20 +1441,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
 static cycle_t read_tsc(void)
 {
-       cycle_t ret;
-       u64 last;
-
-       /*
-        * Empirically, a fence (of type that depends on the CPU)
-        * before rdtsc is enough to ensure that rdtsc is ordered
-        * with respect to loads.  The various CPU manuals are unclear
-        * as to whether rdtsc can be reordered with later loads,
-        * but no one has ever seen it happen.
-        */
-       rdtsc_barrier();
-       ret = (cycle_t)vget_cycles();
-
-       last = pvclock_gtod_data.clock.cycle_last;
+       cycle_t ret = (cycle_t)rdtsc_ordered();
+       u64 last = pvclock_gtod_data.clock.cycle_last;
 
        if (likely(ret >= last))
                return ret;
@@ -1643,7 +1631,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                return 1;
        }
        if (!use_master_clock) {
-               host_tsc = native_read_tsc();
+               host_tsc = rdtsc();
                kernel_ns = get_kernel_ns();
        }
 
@@ -1985,7 +1973,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (guest_cpuid_has_tsc_adjust(vcpu)) {
                        if (!msr_info->host_initiated) {
                                s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
-                               kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+                               adjust_tsc_offset_guest(vcpu, adj);
                        }
                        vcpu->arch.ia32_tsc_adjust_msr = data;
                }
@@ -2620,7 +2608,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
                s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
-                               native_read_tsc() - vcpu->arch.last_host_tsc;
+                               rdtsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
@@ -2648,7 +2636,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        kvm_x86_ops->vcpu_put(vcpu);
        kvm_put_guest_fpu(vcpu);
-       vcpu->arch.last_host_tsc = native_read_tsc();
+       vcpu->arch.last_host_tsc = rdtsc();
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -6083,6 +6071,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 static void process_smi(struct kvm_vcpu *vcpu)
 {
        struct kvm_segment cs, ds;
+       struct desc_ptr dt;
        char buf[512];
        u32 cr0;
 
@@ -6115,6 +6104,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
 
        kvm_x86_ops->set_cr4(vcpu, 0);
 
+       /* Undocumented: IDT limit is set to zero on entry to SMM.  */
+       dt.address = dt.size = 0;
+       kvm_x86_ops->set_idt(vcpu, &dt);
+
        __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
 
        cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
@@ -6384,7 +6377,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                hw_breakpoint_restore();
 
        vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-                                                          native_read_tsc());
+                                                          rdtsc());
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
@@ -7193,7 +7186,7 @@ int kvm_arch_hardware_enable(void)
        if (ret != 0)
                return ret;
 
-       local_tsc = native_read_tsc();
+       local_tsc = rdtsc();
        stable = !check_tsc_unstable();
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {