]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86/pmu: Reprogram mediated PMU event selectors on event filter updates
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Sat, 6 Dec 2025 00:17:01 +0000 (16:17 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 19:52:10 +0000 (11:52 -0800)
Refresh the event selectors that are programmed into hardware when a PMC
is "reprogrammed" for a mediated PMU, i.e. if userspace changes the PMU
event filters

Note, KVM doesn't utilize the reprogramming infrastructure to handle
counter overflow for mediated PMUs, as there's no need to reprogram a
non-existent perf event.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Co-developed-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
[sean: add a helper to document behavior, split patch and rewrite changelog]
Tested-by: Xudong Hao <xudong.hao@intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-26-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c

index a05366e4eef2dab1acb0e6ee165e29271985050c..24f5c14715efb572500a92dddb17d8acbf1166e7 100644 (file)
@@ -520,6 +520,25 @@ static bool pmc_is_event_allowed(struct kvm_pmc *pmc)
        return is_fixed_event_allowed(filter, pmc->idx);
 }
 
+static void kvm_mediated_pmu_refresh_event_filter(struct kvm_pmc *pmc)
+{
+       bool allowed = pmc_is_event_allowed(pmc);
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+       if (pmc_is_gp(pmc)) {
+               pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+               if (allowed)
+                       pmc->eventsel_hw |= pmc->eventsel &
+                                           ARCH_PERFMON_EVENTSEL_ENABLE;
+       } else {
+               u64 mask = intel_fixed_bits_by_idx(pmc->idx - KVM_FIXED_PMC_BASE_IDX, 0xf);
+
+               pmu->fixed_ctr_ctrl_hw &= ~mask;
+               if (allowed)
+                       pmu->fixed_ctr_ctrl_hw |= pmu->fixed_ctr_ctrl & mask;
+       }
+}
+
 static int reprogram_counter(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -528,6 +547,11 @@ static int reprogram_counter(struct kvm_pmc *pmc)
        bool emulate_overflow;
        u8 fixed_ctr_ctrl;
 
+       if (kvm_vcpu_has_mediated_pmu(pmu_to_vcpu(pmu))) {
+               kvm_mediated_pmu_refresh_event_filter(pmc);
+               return 0;
+       }
+
        emulate_overflow = pmc_pause_counter(pmc);
 
        if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) ||