]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: nv: Reprogram PMU events affected by nested transition
authorOliver Upton <oliver.upton@linux.dev>
Fri, 25 Oct 2024 18:25:59 +0000 (18:25 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 31 Oct 2024 19:00:41 +0000 (19:00 +0000)
Start reprogramming PMU events at nested boundaries now that everything
is in place to handle the EL2 event filter. Only repaint events where
the filter differs between EL1 and EL2 as a slight optimization.

PMU now 'works' for nested VMs, albeit slow.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20241025182559.3364829-1-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/pmu-emul.c
include/kvm/arm_pmu.h

index 897ea81ed2b2990f8051f36483e622d1b16b441b..71c97c214c1c5c63a57b8b4e9c8b9abd954446be 100644 (file)
@@ -2450,6 +2450,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
 
        kvm_arch_vcpu_load(vcpu, smp_processor_id());
        preempt_enable();
+
+       kvm_pmu_nested_transition(vcpu);
 }
 
 static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2532,6 +2534,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
        kvm_arch_vcpu_load(vcpu, smp_processor_id());
        preempt_enable();
 
+       kvm_pmu_nested_transition(vcpu);
+
        return 1;
 }
 
index e2eb2ba903b67f6a28457d2f02f93db3fa04faaa..8ad62284fa230c6fbccd336737c493cfe6cd590e 100644 (file)
@@ -1215,3 +1215,32 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
 
        return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
 }
+
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
+{
+       bool reprogrammed = false;
+       unsigned long mask;
+       int i;
+
+       if (!kvm_vcpu_has_pmu(vcpu))
+               return;
+
+       mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+       for_each_set_bit(i, &mask, 32) {
+               struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+
+               /*
+                * We only need to reconfigure events where the filter is
+                * different at EL1 vs. EL2, as we're multiplexing the true EL1
+                * event filter bit for nested.
+                */
+               if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
+                       continue;
+
+               kvm_pmu_create_perf_event(pmc);
+               reprogrammed = true;
+       }
+
+       if (reprogrammed)
+               kvm_vcpu_pmu_restore_guest(vcpu);
+}
index 76244f0bd47a860bca0d259be0bbc8f15eea1a0b..e61dd7dd22869dd01d54428f612ac682a9c4dd74 100644 (file)
@@ -98,6 +98,7 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
 
 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
 #else
 struct kvm_pmu {
 };
@@ -198,6 +199,8 @@ static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int id
        return false;
 }
 
+static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
+
 #endif
 
 #endif