From: Sean Christopherson Date: Fri, 23 May 2025 01:17:54 +0000 (-0700) Subject: KVM: VMX: Apply MMIO Stale Data mitigation if KVM maps MMIO into the guest X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=83ebe715748314331f9639de2220d02debfe926d;p=thirdparty%2Flinux.git KVM: VMX: Apply MMIO Stale Data mitigation if KVM maps MMIO into the guest Enforce the MMIO State Data mitigation if KVM has ever mapped host MMIO into the VM, not if the VM has an assigned device. VFIO is but one of many ways to map host MMIO into a KVM guest, and even within VFIO, formally attaching a device to a VM via KVM_DEV_VFIO_FILE_ADD is entirely optional. Track whether or not the guest can access host MMIO on a per-MMU basis, i.e. based on whether or not the vCPU has a mapping to host MMIO. For simplicity, track MMIO mappings in "special" rools (those without a kvm_mmu_page) at the VM level, as only Intel CPUs are vulnerable, and so only legacy 32-bit shadow paging is affected, i.e. lack of precise tracking is a complete non-issue. Make the per-MMU and per-VM flags sticky. Detecting when *all* MMIO mappings have been removed would be absurdly complex. And in practice, removing MMIO from a guest will be done by deleting the associated memslot, which by default will force KVM to re-allocate all roots. Special roots will forever be mitigated, but as above, the affected scenarios are not expected to be performance sensitive. Use a VMX_RUN flag to communicate the need for a buffers flush to vmx_vcpu_enter_exit() so that kvm_vcpu_can_access_host_mmio() and all its dependencies don't need to be marked __always_inline, e.g. so that KASAN doesn't trigger a noinstr violation. Cc: Pawan Gupta Cc: Borislav Petkov Fixes: 8cb861e9e3c9 ("x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data") Tested-by: Pawan Gupta Link: https://lore.kernel.org/r/20250523011756.3243624-4-seanjc@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4a391929cdba..6a172c7630f3f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1451,6 +1451,7 @@ struct kvm_arch { bool x2apic_format; bool x2apic_broadcast_quirk_disabled; + bool has_mapped_host_mmio; bool guest_can_read_msr_platform_info; bool exception_payload_enabled; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index db8f33e4de624..65f3c89d7c5d2 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -103,6 +103,9 @@ struct kvm_mmu_page { int root_count; refcount_t tdp_mmu_root_count; }; + + bool has_mapped_host_mmio; + union { /* These two members aren't used for TDP MMU */ struct { diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 3f16c91aa0423..df31039b5d63b 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -138,6 +138,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio) return *is_host_mmio; } +static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa); + + if (root) + WRITE_ONCE(root->has_mapped_host_mmio, true); + else + WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true); + + /* + * Force vCPUs to exit and flush CPU buffers if the vCPU is using the + * affected root(s). + */ + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE); +} + /* * Returns true if the SPTE needs to be updated atomically due to having bits * that may be changed without holding mmu_lock, and for which KVM must not @@ -276,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); } + if (static_branch_unlikely(&cpu_buf_vm_clear) && + !kvm_vcpu_can_access_host_mmio(vcpu) && + kvm_is_mmio_pfn(pfn, &is_host_mmio)) + kvm_track_host_mmio_mapping(vcpu); + *new_spte = spte; return wrprot; } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1e94f081bdaf4..3133f066927e1 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep) return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep))); } +static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa); + + if (root) + return READ_ONCE(root->has_mapped_host_mmio); + + return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio); +} + static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) { return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value && diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h index 6a9bfdfbb6e59..2f20fb170def8 100644 --- a/arch/x86/kvm/vmx/run_flags.h +++ b/arch/x86/kvm/vmx/run_flags.h @@ -2,10 +2,12 @@ #ifndef __KVM_X86_VMX_RUN_FLAGS_H #define __KVM_X86_VMX_RUN_FLAGS_H -#define VMX_RUN_VMRESUME_SHIFT 0 -#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 +#define VMX_RUN_VMRESUME_SHIFT 0 +#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 +#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2 -#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) -#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) +#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) +#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) +#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT) #endif /* __KVM_X86_VMX_RUN_FLAGS_H */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4953846cb30d1..3025b11007fd1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -75,6 +75,8 @@ #include "vmx_onhyperv.h" #include "posted_intr.h" +#include "mmu/spte.h" + MODULE_AUTHOR("Qumranet"); MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions"); MODULE_LICENSE("GPL"); @@ -963,6 +965,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; + if (static_branch_unlikely(&cpu_buf_vm_clear) && + kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) + flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO; + return flags; } @@ -7290,7 +7296,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&cpu_buf_vm_clear) && - kvm_arch_has_assigned_device(vcpu->kvm)) + (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO)) mds_clear_cpu_buffers(); vmx_disable_fb_clear(vmx);