#define nested_vmx_merge_msr_bitmaps_rw(msr) \
nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_RW)
+static void nested_vmx_merge_pmu_msr_bitmaps(struct kvm_vcpu *vcpu,
+ unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_l0)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int i;
+
+ /*
+ * Skip the merges if the vCPU doesn't have a mediated PMU MSR, i.e. if
+ * none of the MSRs can possibly be passed through to L1.
+ */
+ if (!kvm_vcpu_has_mediated_pmu(vcpu))
+ return;
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PERFCTR0 + i);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PMC0 + i);
+ }
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_FIXED_CTR0 + i);
+
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_GLOBAL_CTRL);
+ nested_vmx_merge_msr_bitmaps_read(MSR_CORE_PERF_GLOBAL_STATUS);
+ nested_vmx_merge_msr_bitmaps_write(MSR_CORE_PERF_GLOBAL_OVF_CTRL);
+}
+
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_PL3_SSP, MSR_TYPE_RW);
+ nested_vmx_merge_pmu_msr_bitmaps(vcpu, msr_bitmap_l1, msr_bitmap_l0);
+
kvm_vcpu_unmap(vcpu, &map);
vmx->nested.force_msr_bitmap_recalc = false;