]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: nVMX: Disable PMU MSR interception as appropriate while running L2
authorMingwei Zhang <mizhang@google.com>
Sat, 6 Dec 2025 00:17:07 +0000 (16:17 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 19:52:14 +0000 (11:52 -0800)
Merge KVM's PMU MSR interception bitmaps with those of L1, i.e. merge the
bitmaps of vmcs01 and vmcs12, e.g. so that KVM doesn't interpose on MSR
accesses unnecessarily if L1 exposes a mediated PMU (or equivalent) to L2.

Signed-off-by: Mingwei Zhang <mizhang@google.com>
Co-developed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
[sean: rewrite changelog and comment, omit MSRs that are always intercepted]
Tested-by: Xudong Hao <xudong.hao@intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-32-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/nested.c

index b56ed2b1ac674c88f1c7e94674b945e2507be1e4..729cc1f05ac80251485688266a0a01396a3b90f5 100644 (file)
@@ -630,6 +630,34 @@ static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
 #define nested_vmx_merge_msr_bitmaps_rw(msr) \
        nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_RW)
 
+static void nested_vmx_merge_pmu_msr_bitmaps(struct kvm_vcpu *vcpu,
+                                            unsigned long *msr_bitmap_l1,
+                                            unsigned long *msr_bitmap_l0)
+{
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int i;
+
+       /*
+        * Skip the merges if the vCPU doesn't have a mediated PMU MSR, i.e. if
+        * none of the MSRs can possibly be passed through to L1.
+        */
+       if (!kvm_vcpu_has_mediated_pmu(vcpu))
+               return;
+
+       for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+               nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PERFCTR0 + i);
+               nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PMC0 + i);
+       }
+
+       for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+               nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_FIXED_CTR0 + i);
+
+       nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_GLOBAL_CTRL);
+       nested_vmx_merge_msr_bitmaps_read(MSR_CORE_PERF_GLOBAL_STATUS);
+       nested_vmx_merge_msr_bitmaps_write(MSR_CORE_PERF_GLOBAL_OVF_CTRL);
+}
+
 /*
  * Merge L0's and L1's MSR bitmap, return false to indicate that
  * we do not use the hardware.
@@ -745,6 +773,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
        nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
                                         MSR_IA32_PL3_SSP, MSR_TYPE_RW);
 
+       nested_vmx_merge_pmu_msr_bitmaps(vcpu, msr_bitmap_l1, msr_bitmap_l0);
+
        kvm_vcpu_unmap(vcpu, &map);
 
        vmx->nested.force_msr_bitmap_recalc = false;