]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Make PMEVTYPER<n>_EL0.NSH RES0 if EL2 isn't advertised
authorOliver Upton <oliver.upton@linux.dev>
Thu, 19 Oct 2023 18:56:17 +0000 (18:56 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 24 Oct 2023 19:26:14 +0000 (19:26 +0000)
The NSH bit, which filters event counting at EL2, is required by the
architecture if an implementation has EL2. Even though KVM doesn't
support nested virt yet, it makes no effort to hide the existence of EL2
from the ID registers. Userspace can, however, change the value of PFR0
to hide EL2. Align KVM's sysreg emulation with the architecture and make
NSH RES0 if EL2 isn't advertised. Keep in mind the bit is ignored when
constructing the backing perf event.

While at it, build the event type mask using explicit field definitions
instead of relying on ARMV8_PMU_EVTYPE_MASK. KVM probably should've been
doing this in the first place, as it avoids changes to the
aforementioned mask affecting sysreg emulation.

Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Link: https://lore.kernel.org/r/20231019185618.3442949-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
include/kvm/arm_pmu.h

index 6b066e04dc5dfe506572338cc3f1008c5f3263d6..32d83db9674ecfce7189807d796c8830e37423f8 100644 (file)
@@ -60,6 +60,18 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
        return __kvm_pmu_event_mask(pmuver);
 }
 
+u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
+{
+       u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
+                  kvm_pmu_event_mask(kvm);
+       u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
+
+       if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
+               mask |= ARMV8_PMU_INCLUDE_EL2;
+
+       return mask;
+}
+
 /**
  * kvm_pmc_is_64bit - determine if counter is 64bit
  * @pmc: counter context
@@ -657,18 +669,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
                                    u64 select_idx)
 {
        struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
-       u64 reg, mask;
+       u64 reg;
 
        if (!kvm_vcpu_has_pmu(vcpu))
                return;
 
-       mask  =  ARMV8_PMU_EVTYPE_MASK;
-       mask &= ~ARMV8_PMU_EVTYPE_EVENT;
-       mask |= kvm_pmu_event_mask(vcpu->kvm);
-
        reg = counter_index_to_evtreg(pmc->idx);
-
-       __vcpu_sys_reg(vcpu, reg) = data & mask;
+       __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
 
        kvm_pmu_create_perf_event(pmc);
 }
index e92ec810d4494bac8ec83c7f23d0a360d9651493..78720c373904f177a83a1d31e9727c062424a31a 100644 (file)
@@ -746,8 +746,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 
 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
+       /* This thing will UNDEF, who cares about the reset value? */
+       if (!kvm_vcpu_has_pmu(vcpu))
+               return 0;
+
        reset_unknown(vcpu, r);
-       __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
+       __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
 
        return __vcpu_sys_reg(vcpu, r->reg);
 }
@@ -988,7 +992,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
                kvm_vcpu_pmu_restore_guest(vcpu);
        } else {
-               p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
+               p->regval = __vcpu_sys_reg(vcpu, reg);
        }
 
        return true;
index 31029f4f7be851d684c9500c243b59e979e6df67..fd0aa8105a5bf49c7f63e7d0f79f7918cdaca061 100644 (file)
@@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void);
 })
 
 u8 kvm_arm_pmu_get_pmuver_limit(void);
+u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
 
 #else
 struct kvm_pmu {
@@ -172,6 +173,10 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
 {
        return 0;
 }
+static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
+{
+       return 0;
+}
 static inline void kvm_vcpu_pmu_resync_el0(void) {}
 
 #endif