]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86: Push down setting vcpu.arch.user_set_tsc
authorIsaku Yamahata <isaku.yamahata@intel.com>
Sat, 12 Oct 2024 07:55:55 +0000 (00:55 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 14 Mar 2025 17:55:32 +0000 (13:55 -0400)
Push down setting vcpu.arch.user_set_tsc to true from kvm_synchronize_tsc()
to __kvm_synchronize_tsc() so that the two callers don't have to modify
user_set_tsc directly as preparation.

Later, prohibit changing TSC synchronization for TDX guests to modify
__kvm_synchornize_tsc() change.  We don't want to touch caller sites not to
change user_set_tsc.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Message-ID: <62b1a7a35d6961844786b6e47e8ecb774af7a228.1728719037.git.isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index bd2b71d4e64f0876cfb616c35a231f10d325ca9d..2da75bbf7f943b8fbe9e2fdff8f2639fe2d46ddb 100644 (file)
@@ -2626,12 +2626,15 @@ static inline bool kvm_check_tsc_unstable(void)
  * participates in.
  */
 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
-                                 u64 ns, bool matched)
+                                 u64 ns, bool matched, bool user_set_tsc)
 {
        struct kvm *kvm = vcpu->kvm;
 
        lockdep_assert_held(&kvm->arch.tsc_write_lock);
 
+       if (user_set_tsc)
+               vcpu->kvm->arch.user_set_tsc = true;
+
        /*
         * We also track th most recent recorded KHZ, write and time to
         * allow the matching interval to be extended at each write.
@@ -2717,8 +2720,6 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
                }
        }
 
-       if (user_value)
-               kvm->arch.user_set_tsc = true;
 
        /*
         * For a reliable TSC, we can match TSC offsets, and for an unstable
@@ -2738,7 +2739,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
                matched = true;
        }
 
-       __kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
+       __kvm_synchronize_tsc(vcpu, offset, data, ns, matched, !!user_value);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }
 
@@ -5725,8 +5726,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
                tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
                ns = get_kvmclock_base_ns();
 
-               kvm->arch.user_set_tsc = true;
-               __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);
+               __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched, true);
                raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
                r = 0;