]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: TDX: Explicitly set user-return MSRs that *may* be clobbered by the TDX-Module
authorSean Christopherson <seanjc@google.com>
Thu, 30 Oct 2025 19:15:25 +0000 (12:15 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 7 Nov 2025 18:59:45 +0000 (10:59 -0800)
Set all user-return MSRs to their post-TD-exit value when preparing to run
a TDX vCPU to ensure the value that KVM expects to be loaded after running
the vCPU is indeed the value that's loaded in hardware.  If the TDX-Module
doesn't actually enter the guest, i.e. doesn't do VM-Enter, then it won't
"restore" VMM state, i.e. won't clobber user-return MSRs to their expected
post-run values, in which case simply updating KVM's "cached" value will
effectively corrupt the cache due to hardware still holding the original
value.

In theory, KVM could conditionally update the current user-return value if
and only if tdh_vp_enter() succeeds, but in practice "success" doesn't
guarantee the TDX-Module actually entered the guest, e.g. if the TDX-Module
synthesizes an EPT Violation because it suspects a zero-step attack.

Force-load the expected values instead of trying to decipher whether or
not the TDX-Module restored/clobbered MSRs, as the risk doesn't justify
the benefits.  Effectively avoiding four WRMSRs once per run loop (even if
the vCPU is scheduled out, user-return MSRs only need to be reloaded if
the CPU exits to userspace or runs a non-TDX vCPU) is likely in the noise
when amortized over all entries, given the cost of running a TDX vCPU.
E.g. the cost of the WRMSRs is somewhere between ~300 and ~500 cycles,
whereas the cost of a _single_ roundtrip to/from a TDX guest is thousands
of cycles.

Fixes: e0b4f31a3c65 ("KVM: TDX: restore user ret MSRs")
Cc: stable@vger.kernel.org
Cc: Yan Zhao <yan.y.zhao@intel.com>
Cc: Xiaoyao Li <xiaoyao.li@intel.com>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Link: https://patch.msgid.link/20251030191528.3380553-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/vmx/tdx.h
arch/x86/kvm/x86.c

index 4fbe4b7ce1dad7cc802aee4b46199599b9b38e11..a557c504c1a4258c4cfd08a9e404ed8139e32848 100644 (file)
@@ -2379,7 +2379,6 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
 int kvm_add_user_return_msr(u32 msr);
 int kvm_find_user_return_msr(u32 msr);
 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
-void kvm_user_return_msr_update_cache(unsigned int index, u64 val);
 u64 kvm_get_user_return_msr(unsigned int slot);
 
 static inline bool kvm_is_supported_user_return_msr(u32 msr)
index 67c190ce8104ee3d6f840f953fd544c363799655..163f854a39f25e35ca051d330cff83fec845afc3 100644 (file)
@@ -763,25 +763,6 @@ static bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
        return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
 }
 
-/*
- * Compared to vmx_prepare_switch_to_guest(), there is not much to do
- * as SEAMCALL/SEAMRET calls take care of most of save and restore.
- */
-void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_vt *vt = to_vt(vcpu);
-
-       if (vt->guest_state_loaded)
-               return;
-
-       if (likely(is_64bit_mm(current->mm)))
-               vt->msr_host_kernel_gs_base = current->thread.gsbase;
-       else
-               vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
-
-       vt->guest_state_loaded = true;
-}
-
 struct tdx_uret_msr {
        u32 msr;
        unsigned int slot;
@@ -795,19 +776,38 @@ static struct tdx_uret_msr tdx_uret_msrs[] = {
        {.msr = MSR_TSC_AUX,},
 };
 
-static void tdx_user_return_msr_update_cache(void)
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_vt *vt = to_vt(vcpu);
        int i;
 
+       if (vt->guest_state_loaded)
+               return;
+
+       if (likely(is_64bit_mm(current->mm)))
+               vt->msr_host_kernel_gs_base = current->thread.gsbase;
+       else
+               vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+
+       vt->guest_state_loaded = true;
+
+       /*
+        * Explicitly set user-return MSRs that are clobbered by the TDX-Module
+        * if VP.ENTER succeeds, i.e. on TD-Exit, with the values that would be
+        * written by the TDX-Module.  Don't rely on the TDX-Module to actually
+        * clobber the MSRs, as the contract is poorly defined and not upheld.
+        * E.g. the TDX-Module will synthesize an EPT Violation without doing
+        * VM-Enter if it suspects a zero-step attack, and never "restore" VMM
+        * state.
+        */
        for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
-               kvm_user_return_msr_update_cache(tdx_uret_msrs[i].slot,
-                                                tdx_uret_msrs[i].defval);
+               kvm_set_user_return_msr(tdx_uret_msrs[i].slot,
+                                       tdx_uret_msrs[i].defval, -1ull);
 }
 
 static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vt *vt = to_vt(vcpu);
-       struct vcpu_tdx *tdx = to_tdx(vcpu);
 
        if (!vt->guest_state_loaded)
                return;
@@ -815,11 +815,6 @@ static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
        ++vcpu->stat.host_state_reload;
        wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
 
-       if (tdx->guest_entered) {
-               tdx_user_return_msr_update_cache();
-               tdx->guest_entered = false;
-       }
-
        vt->guest_state_loaded = false;
 }
 
@@ -1059,7 +1054,6 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
                update_debugctlmsr(vcpu->arch.host_debugctl);
 
        tdx_load_host_xsave_state(vcpu);
-       tdx->guest_entered = true;
 
        vcpu->arch.regs_avail &= TDX_REGS_AVAIL_SET;
 
@@ -3443,10 +3437,6 @@ static int __init __tdx_bringup(void)
                /*
                 * Check if MSRs (tdx_uret_msrs) can be saved/restored
                 * before returning to user space.
-                *
-                * this_cpu_ptr(user_return_msrs)->registered isn't checked
-                * because the registration is done at vcpu runtime by
-                * tdx_user_return_msr_update_cache().
                 */
                tdx_uret_msrs[i].slot = kvm_find_user_return_msr(tdx_uret_msrs[i].msr);
                if (tdx_uret_msrs[i].slot == -1) {
index ca39a9391db1e33312cc9ac77b71648488cd7ba2..7f258870dc410972f5f568459fbe0ee9fcad1bca 100644 (file)
@@ -67,7 +67,6 @@ struct vcpu_tdx {
        u64 vp_enter_ret;
 
        enum vcpu_tdx_state state;
-       bool guest_entered;
 
        u64 map_gpa_next;
        u64 map_gpa_end;
index 877c8766c5519fed4b8c17f00b5214f07af953c2..f4ce4292eb520a6a2287be0104a22beee5251408 100644 (file)
@@ -681,15 +681,6 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
 }
 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr);
 
-void kvm_user_return_msr_update_cache(unsigned int slot, u64 value)
-{
-       struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
-
-       msrs->values[slot].curr = value;
-       kvm_user_return_register_notifier(msrs);
-}
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_user_return_msr_update_cache);
-
 u64 kvm_get_user_return_msr(unsigned int slot)
 {
        return this_cpu_ptr(user_return_msrs)->values[slot].curr;