Disable RDPMC interception for vCPUs with a mediated vPMU that is
compatible with the host PMU, i.e. that doesn't require KVM emulation of
RDPMC to honor the guest's vCPU model. With a mediated vPMU, all guest
state accessible via RDPMC is loaded into hardware while the guest is
running.
Adust RDPMC interception only for non-TDX guests, as the TDX module is
responsible for managing RDPMC intercepts based on the TD configuration.
Co-developed-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Co-developed-by: Sandipan Das <sandipan.das@amd.com>
Signed-off-by: Sandipan Das <sandipan.das@amd.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-21-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
return 0;
}
+bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
+ if (!kvm_vcpu_has_mediated_pmu(vcpu))
+ return true;
+
+ /*
+ * VMware allows access to these Pseduo-PMCs even when read via RDPMC
+ * in Ring3 when CR4.PCE=0.
+ */
+ if (enable_vmware_backdoor)
+ return true;
+
+ /*
+ * Note! Check *host* PMU capabilities, not KVM's PMU capabilities, as
+ * KVM's capabilities are constrained based on KVM support, i.e. KVM's
+ * capabilities themselves may be a subset of hardware capabilities.
+ */
+ return pmu->nr_arch_gp_counters != kvm_host_pmu.num_counters_gp ||
+ pmu->nr_arch_fixed_counters != kvm_host_pmu.num_counters_fixed ||
+ pmu->counter_bitmask[KVM_PMC_GP] != (BIT_ULL(kvm_host_pmu.bit_width_gp) - 1) ||
+ pmu->counter_bitmask[KVM_PMC_FIXED] != (BIT_ULL(kvm_host_pmu.bit_width_fixed) - 1);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_need_rdpmc_intercept);
+
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
if (lapic_in_kernel(vcpu)) {
void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
+bool kvm_need_rdpmc_intercept(struct kvm_vcpu *vcpu);
extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
}
}
+
+ if (kvm_need_rdpmc_intercept(vcpu))
+ svm_set_intercept(svm, INTERCEPT_RDPMC);
+ else
+ svm_clr_intercept(svm, INTERCEPT_RDPMC);
}
static void svm_recalc_intercepts(struct kvm_vcpu *vcpu)
*/
}
+static void vmx_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
+{
+ exec_controls_changebit(to_vmx(vcpu), CPU_BASED_RDPMC_EXITING,
+ kvm_need_rdpmc_intercept(vcpu));
+}
+
void vmx_recalc_intercepts(struct kvm_vcpu *vcpu)
{
+ vmx_recalc_instruction_intercepts(vcpu);
vmx_recalc_msr_intercepts(vcpu);
}
vcpu->arch.perf_capabilities = data;
kvm_pmu_refresh(vcpu);
+ kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
break;
case MSR_IA32_PRED_CMD: {
u64 reserved_bits = ~(PRED_CMD_IBPB | PRED_CMD_SBPB);