return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
}
+static inline bool kvm_pmu_is_fastpath_emulation_allowed(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
+ return !kvm_vcpu_has_mediated_pmu(vcpu) ||
+ !bitmap_intersects(pmu->pmc_counting_instructions,
+ (unsigned long *)&pmu->global_ctrl,
+ X86_PMC_IDX_MAX);
+}
+
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu)
{
+ if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
+ return EXIT_FASTPATH_NONE;
+
if (!kvm_emulate_invd(vcpu))
return EXIT_FASTPATH_EXIT_USERSPACE;
static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
+ if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
+ return EXIT_FASTPATH_NONE;
+
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic) ||
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
{
+ if (!kvm_pmu_is_fastpath_emulation_allowed(vcpu))
+ return EXIT_FASTPATH_NONE;
+
if (!kvm_emulate_halt(vcpu))
return EXIT_FASTPATH_EXIT_USERSPACE;