]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Use KVM_REQ_RELOAD_PMU to handle PMCR_EL0.E change
authorOliver Upton <oliver.upton@linux.dev>
Tue, 17 Dec 2024 17:55:32 +0000 (09:55 -0800)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 18 Dec 2024 21:22:25 +0000 (13:22 -0800)
Nested virt introduces yet another set of 'global' knobs for controlling
event counters that are reserved for EL2 (i.e. >= HPMN). Get ready to
share some plumbing with the NV controls by offloading counter
reprogramming to KVM_REQ_RELOAD_PMU.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20241217175532.3658134-1-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/pmu-emul.c

index 6b3ec956a6e2b650f71149d10f649f4a242a66ba..c6423782a8aa827b4c6ce60e7602cc422e1bd6b0 100644 (file)
@@ -606,17 +606,13 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
        if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
                val &= ~ARMV8_PMU_PMCR_LP;
 
+       /* Request a reload of the PMU to enable/disable affected counters */
+       if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
+               kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
        /* The reset bits don't indicate any state, and shouldn't be saved. */
        __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
 
-       if (val & ARMV8_PMU_PMCR_E) {
-               kvm_pmu_reprogram_counter_mask(vcpu,
-                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
-       } else {
-               kvm_pmu_reprogram_counter_mask(vcpu,
-                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
-       }
-
        if (val & ARMV8_PMU_PMCR_C)
                kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
 
@@ -626,7 +622,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
                for_each_set_bit(i, &mask, 32)
                        kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
        }
-       kvm_vcpu_pmu_restore_guest(vcpu);
 }
 
 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
@@ -890,11 +885,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
 {
        u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
 
-       kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
-
        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
+
+       kvm_pmu_reprogram_counter_mask(vcpu, mask);
 }
 
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)