]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Calculate cptr_el2 traps on activating traps
authorFuad Tabba <tabba@google.com>
Fri, 21 Mar 2025 00:10:10 +0000 (00:10 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Mar 2025 21:04:58 +0000 (22:04 +0100)
[ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]

Similar to VHE, calculate the value of cptr_el2 from scratch on
activate traps. This removes the need to store cptr_el2 in every
vcpu structure. Moreover, some traps, such as whether the guest
owns the fp registers, need to be set on every vcpu run.

Reported-by: James Clark <james.clark@linaro.org>
Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/switch.c

index c85aa4f1def810a08e7bfa4ab181760839521c25..6762dadce45deb657b6e8df3e14dc9fbef884f1d 100644 (file)
@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
        u64 hcr_el2;
        u64 hcrx_el2;
        u64 mdcr_el2;
-       u64 cptr_el2;
 
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
index 3b3ecfed294f2be5bf545f4e89ba6e53415625f5..591f512ab072963424f4b2287a1a572fc72bd639 100644 (file)
@@ -1569,7 +1569,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
        }
 
        vcpu_reset_hcr(vcpu);
-       vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
 
        /*
         * Handle the "start in power-off" case.
index 071993c16de81ca0b0181c56d0598b1b026ae018..6405fa30f961723c0da0761be079e09f91b7e8e1 100644 (file)
@@ -31,8 +31,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
        const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
        u64 hcr_set = HCR_RW;
        u64 hcr_clear = 0;
-       u64 cptr_set = 0;
-       u64 cptr_clear = 0;
 
        /* Protected KVM does not support AArch32 guests. */
        BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@@ -62,21 +60,10 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
        /* Trap AMU */
        if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
                hcr_clear |= HCR_AMVOFFEN;
-               cptr_set |= CPTR_EL2_TAM;
-       }
-
-       /* Trap SVE */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
-               if (has_hvhe())
-                       cptr_clear |= CPACR_ELx_ZEN;
-               else
-                       cptr_set |= CPTR_EL2_TZ;
        }
 
        vcpu->arch.hcr_el2 |= hcr_set;
        vcpu->arch.hcr_el2 &= ~hcr_clear;
-       vcpu->arch.cptr_el2 |= cptr_set;
-       vcpu->arch.cptr_el2 &= ~cptr_clear;
 }
 
 /*
@@ -106,7 +93,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
        const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
        u64 mdcr_set = 0;
        u64 mdcr_clear = 0;
-       u64 cptr_set = 0;
 
        /* Trap/constrain PMU */
        if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
@@ -133,21 +119,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
        if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
                mdcr_set |= MDCR_EL2_TTRF;
 
-       /* Trap Trace */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
-               if (has_hvhe())
-                       cptr_set |= CPACR_EL1_TTA;
-               else
-                       cptr_set |= CPTR_EL2_TTA;
-       }
-
        /* Trap External Trace */
        if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
                mdcr_clear |= MDCR_EL2_E2TB_MASK;
 
        vcpu->arch.mdcr_el2 |= mdcr_set;
        vcpu->arch.mdcr_el2 &= ~mdcr_clear;
-       vcpu->arch.cptr_el2 |= cptr_set;
 }
 
 /*
@@ -198,10 +175,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
        /* Clear res0 and set res1 bits to trap potential new features. */
        vcpu->arch.hcr_el2 &= ~(HCR_RES0);
        vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
-       if (!has_hvhe()) {
-               vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
-               vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
-       }
 }
 
 static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
@@ -236,7 +209,6 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
  */
 static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
        vcpu->arch.mdcr_el2 = 0;
 
        pkvm_vcpu_reset_hcr(vcpu);
@@ -693,8 +665,6 @@ unlock:
                return ret;
        }
 
-       hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
-
        return 0;
 }
 
index cc69106734ca732ba9276ac1eaf84be3e7381648..81d933a71310fd1132b2450cd08108e071a2cf78 100644 (file)
@@ -36,33 +36,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
 
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 {
-       u64 val;
+       u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
 
-       ___activate_traps(vcpu, vcpu->arch.hcr_el2);
-       __activate_traps_common(vcpu);
+       if (has_hvhe()) {
+               val |= CPACR_ELx_TTA;
 
-       val = vcpu->arch.cptr_el2;
-       val |= CPTR_EL2_TAM;    /* Same bit irrespective of E2H */
-       val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
-       if (cpus_have_final_cap(ARM64_SME)) {
-               if (has_hvhe())
-                       val &= ~CPACR_ELx_SMEN;
-               else
-                       val |= CPTR_EL2_TSM;
-       }
+               if (guest_owns_fp_regs()) {
+                       val |= CPACR_ELx_FPEN;
+                       if (vcpu_has_sve(vcpu))
+                               val |= CPACR_ELx_ZEN;
+               }
+       } else {
+               val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
 
-       if (!guest_owns_fp_regs()) {
-               if (has_hvhe())
-                       val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
-               else
-                       val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+               /*
+                * Always trap SME since it's not supported in KVM.
+                * TSM is RES1 if SME isn't implemented.
+                */
+               val |= CPTR_EL2_TSM;
 
-               __activate_traps_fpsimd32(vcpu);
+               if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+                       val |= CPTR_EL2_TZ;
+
+               if (!guest_owns_fp_regs())
+                       val |= CPTR_EL2_TFP;
        }
 
+       if (!guest_owns_fp_regs())
+               __activate_traps_fpsimd32(vcpu);
+
        kvm_write_cptr_el2(val);
+}
+
+static void __activate_traps(struct kvm_vcpu *vcpu)
+{
+       ___activate_traps(vcpu, vcpu->arch.hcr_el2);
+       __activate_traps_common(vcpu);
+       __activate_cptr_traps(vcpu);
+
        write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
 
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {