return is_fixed_event_allowed(filter, pmc->idx);
}
+static void kvm_mediated_pmu_refresh_event_filter(struct kvm_pmc *pmc)
+{
+ bool allowed = pmc_is_event_allowed(pmc);
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+ if (pmc_is_gp(pmc)) {
+ pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+ if (allowed)
+ pmc->eventsel_hw |= pmc->eventsel &
+ ARCH_PERFMON_EVENTSEL_ENABLE;
+ } else {
+ u64 mask = intel_fixed_bits_by_idx(pmc->idx - KVM_FIXED_PMC_BASE_IDX, 0xf);
+
+ pmu->fixed_ctr_ctrl_hw &= ~mask;
+ if (allowed)
+ pmu->fixed_ctr_ctrl_hw |= pmu->fixed_ctr_ctrl & mask;
+ }
+}
+
static int reprogram_counter(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
bool emulate_overflow;
u8 fixed_ctr_ctrl;
+ if (kvm_vcpu_has_mediated_pmu(pmu_to_vcpu(pmu))) {
+ kvm_mediated_pmu_refresh_event_filter(pmc);
+ return 0;
+ }
+
emulate_overflow = pmc_pause_counter(pmc);
if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) ||