]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Add unified helper for reprogramming counters by mask
authorOliver Upton <oliver.upton@linux.dev>
Tue, 17 Dec 2024 17:55:13 +0000 (09:55 -0800)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 18 Dec 2024 21:22:25 +0000 (13:22 -0800)
Having separate helpers for enabling/disabling counters provides the
wrong abstraction, as the state of each counter needs to be evaluated
independently and, in some cases, use a different global enable bit.

Collapse the enable/disable accessors into a single, common helper that
reconfigures every counter set in @mask, leaving the complexity of
determining if an event is actually enabled in
kvm_pmu_counter_is_enabled().

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20241217175513.3658056-1-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
include/kvm/arm_pmu.h

index 456102bc0b555b41a01da5a41d25b57099026bd9..6b3ec956a6e2b650f71149d10f649f4a242a66ba 100644 (file)
@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
 
 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
+static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
 
 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
 {
@@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
                return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
 }
 
-/**
- * kvm_pmu_enable_counter_mask - enable selected PMU counters
- * @vcpu: The vcpu pointer
- * @val: the value guest writes to PMCNTENSET register
- *
- * Call perf_event_enable to start counting the perf event
- */
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
 {
-       int i;
-       if (!kvm_vcpu_has_pmu(vcpu))
-               return;
-
-       if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
+       if (!pmc->perf_event) {
+               kvm_pmu_create_perf_event(pmc);
                return;
+       }
 
-       for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
-               struct kvm_pmc *pmc;
-
-               if (!(val & BIT(i)))
-                       continue;
-
-               pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+       perf_event_enable(pmc->perf_event);
+       if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+               kvm_debug("fail to enable perf event\n");
+}
 
-               if (!pmc->perf_event) {
-                       kvm_pmu_create_perf_event(pmc);
-               } else {
-                       perf_event_enable(pmc->perf_event);
-                       if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
-                               kvm_debug("fail to enable perf event\n");
-               }
-       }
+static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event)
+               perf_event_disable(pmc->perf_event);
 }
 
-/**
- * kvm_pmu_disable_counter_mask - disable selected PMU counters
- * @vcpu: The vcpu pointer
- * @val: the value guest writes to PMCNTENCLR register
- *
- * Call perf_event_disable to stop counting the perf event
- */
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
 {
        int i;
 
@@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
                return;
 
        for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
-               struct kvm_pmc *pmc;
+               struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
 
                if (!(val & BIT(i)))
                        continue;
 
-               pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
-
-               if (pmc->perf_event)
-                       perf_event_disable(pmc->perf_event);
+               if (kvm_pmu_counter_is_enabled(pmc))
+                       kvm_pmc_enable_perf_event(pmc);
+               else
+                       kvm_pmc_disable_perf_event(pmc);
        }
+
+       kvm_vcpu_pmu_restore_guest(vcpu);
 }
 
 /*
@@ -630,10 +610,10 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
        __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
 
        if (val & ARMV8_PMU_PMCR_E) {
-               kvm_pmu_enable_counter_mask(vcpu,
+               kvm_pmu_reprogram_counter_mask(vcpu,
                       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
        } else {
-               kvm_pmu_disable_counter_mask(vcpu,
+               kvm_pmu_reprogram_counter_mask(vcpu,
                       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
        }
 
index e2a5c2918d9e5af9ee8526dce221d6e80c292d03..6ef8641d9833cc65d64b74287b8eb5ba41b845e9 100644 (file)
@@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        mask = kvm_pmu_accessible_counter_mask(vcpu);
        if (p->is_write) {
                val = p->regval & mask;
-               if (r->Op2 & 0x1) {
+               if (r->Op2 & 0x1)
                        /* accessing PMCNTENSET_EL0 */
                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
-                       kvm_pmu_enable_counter_mask(vcpu, val);
-                       kvm_vcpu_pmu_restore_guest(vcpu);
-               } else {
+               else
                        /* accessing PMCNTENCLR_EL0 */
                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
-                       kvm_pmu_disable_counter_mask(vcpu, val);
-               }
+
+               kvm_pmu_reprogram_counter_mask(vcpu, val);
        } else {
                p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
        }
index e61dd7dd22869dd01d54428f612ac682a9c4dd74..147bd3ee4f7bae1089a4df4e201a1bc4cbad3b57 100644 (file)
@@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
-static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)