]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: VMX: Apply MMIO Stale Data mitigation if KVM maps MMIO into the guest
authorSean Christopherson <seanjc@google.com>
Fri, 23 May 2025 01:17:54 +0000 (18:17 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 25 Jun 2025 15:42:51 +0000 (08:42 -0700)
Enforce the MMIO State Data mitigation if KVM has ever mapped host MMIO
into the VM, not if the VM has an assigned device.  VFIO is but one of
many ways to map host MMIO into a KVM guest, and even within VFIO,
formally attaching a device to a VM via KVM_DEV_VFIO_FILE_ADD is entirely
optional.

Track whether or not the guest can access host MMIO on a per-MMU basis,
i.e. based on whether or not the vCPU has a mapping to host MMIO.  For
simplicity, track MMIO mappings in "special" rools (those without a
kvm_mmu_page) at the VM level, as only Intel CPUs are vulnerable, and so
only legacy 32-bit shadow paging is affected, i.e. lack of precise
tracking is a complete non-issue.

Make the per-MMU and per-VM flags sticky.  Detecting when *all* MMIO
mappings have been removed would be absurdly complex.  And in practice,
removing MMIO from a guest will be done by deleting the associated memslot,
which by default will force KVM to re-allocate all roots.  Special roots
will forever be mitigated, but as above, the affected scenarios are not
expected to be performance sensitive.

Use a VMX_RUN flag to communicate the need for a buffers flush to
vmx_vcpu_enter_exit() so that kvm_vcpu_can_access_host_mmio() and all its
dependencies don't need to be marked __always_inline, e.g. so that KASAN
doesn't trigger a noinstr violation.

Cc: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Fixes: 8cb861e9e3c9 ("x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data")
Tested-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Link: https://lore.kernel.org/r/20250523011756.3243624-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/spte.c
arch/x86/kvm/mmu/spte.h
arch/x86/kvm/vmx/run_flags.h
arch/x86/kvm/vmx/vmx.c

index b4a391929cdbaa9960e870ade649606bdcab2e9a..6a172c7630f3f69aa1a2fdadb9a0894805cd2074 100644 (file)
@@ -1451,6 +1451,7 @@ struct kvm_arch {
        bool x2apic_format;
        bool x2apic_broadcast_quirk_disabled;
 
+       bool has_mapped_host_mmio;
        bool guest_can_read_msr_platform_info;
        bool exception_payload_enabled;
 
index db8f33e4de624601b1bc2b28c1e9357da1c15022..65f3c89d7c5d24979bb9e0df11c3f96017f9dc47 100644 (file)
@@ -103,6 +103,9 @@ struct kvm_mmu_page {
                int root_count;
                refcount_t tdp_mmu_root_count;
        };
+
+       bool has_mapped_host_mmio;
+
        union {
                /* These two members aren't used for TDP MMU */
                struct {
index 3f16c91aa04238e108e58e570c3b8cba530a9ee3..df31039b5d63b32404d7542383ca64fac5d68fe4 100644 (file)
@@ -138,6 +138,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
        return *is_host_mmio;
 }
 
+static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
+
+       if (root)
+               WRITE_ONCE(root->has_mapped_host_mmio, true);
+       else
+               WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true);
+
+       /*
+        * Force vCPUs to exit and flush CPU buffers if the vCPU is using the
+        * affected root(s).
+        */
+       kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
+}
+
 /*
  * Returns true if the SPTE needs to be updated atomically due to having bits
  * that may be changed without holding mmu_lock, and for which KVM must not
@@ -276,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
        }
 
+       if (static_branch_unlikely(&cpu_buf_vm_clear) &&
+           !kvm_vcpu_can_access_host_mmio(vcpu) &&
+           kvm_is_mmio_pfn(pfn, &is_host_mmio))
+               kvm_track_host_mmio_mapping(vcpu);
+
        *new_spte = spte;
        return wrprot;
 }
index 1e94f081bdaf40a334ab9a1dcdd1d7897ae83a5e..3133f066927e14999dd2b726c4ee086031745af8 100644 (file)
@@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep)
        return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
 }
 
+static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
+
+       if (root)
+               return READ_ONCE(root->has_mapped_host_mmio);
+
+       return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio);
+}
+
 static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
 {
        return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
index 6a9bfdfbb6e59b2e613385cd2ad46cc651a0eb28..2f20fb170def8b10c8c0c46f7ba751f845c19e2c 100644 (file)
@@ -2,10 +2,12 @@
 #ifndef __KVM_X86_VMX_RUN_FLAGS_H
 #define __KVM_X86_VMX_RUN_FLAGS_H
 
-#define VMX_RUN_VMRESUME_SHIFT         0
-#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT   1
+#define VMX_RUN_VMRESUME_SHIFT                         0
+#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT                   1
+#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT       2
 
-#define VMX_RUN_VMRESUME               BIT(VMX_RUN_VMRESUME_SHIFT)
-#define VMX_RUN_SAVE_SPEC_CTRL         BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
+#define VMX_RUN_VMRESUME                       BIT(VMX_RUN_VMRESUME_SHIFT)
+#define VMX_RUN_SAVE_SPEC_CTRL                 BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
+#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO     BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT)
 
 #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
index 4953846cb30d1758f2eda294796c5a50ddb9408c..3025b11007fd1aaa7f14e59896293cfdba387a62 100644 (file)
@@ -75,6 +75,8 @@
 #include "vmx_onhyperv.h"
 #include "posted_intr.h"
 
+#include "mmu/spte.h"
+
 MODULE_AUTHOR("Qumranet");
 MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
 MODULE_LICENSE("GPL");
@@ -963,6 +965,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
        if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
                flags |= VMX_RUN_SAVE_SPEC_CTRL;
 
+       if (static_branch_unlikely(&cpu_buf_vm_clear) &&
+           kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
+               flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
+
        return flags;
 }
 
@@ -7290,7 +7296,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
        if (static_branch_unlikely(&vmx_l1d_should_flush))
                vmx_l1d_flush(vcpu);
        else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
-                kvm_arch_has_assigned_device(vcpu->kvm))
+                (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO))
                mds_clear_cpu_buffers();
 
        vmx_disable_fb_clear(vmx);