]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Refactor CPTR trap deactivation
authorMark Rutland <mark.rutland@arm.com>
Mon, 10 Feb 2025 19:52:23 +0000 (19:52 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 13 Feb 2025 17:54:57 +0000 (17:54 +0000)
For historical reasons, the VHE and nVHE/hVHE implementations of
__activate_cptr_traps() pair with a common implementation of
__kvm_reset_cptr_el2(), which ideally would be named
__deactivate_cptr_traps().

Rename __kvm_reset_cptr_el2() to __deactivate_cptr_traps(), and split it
into separate VHE and nVHE/hVHE variants so that each can be paired with
its corresponding implementation of __activate_cptr_traps().

At the same time, fold kvm_write_cptr_el2() into its callers. This
makes it clear in-context whether a write is made to the CPACR_EL1
encoding or the CPTR_EL2 encoding, and removes the possibility of
confusion as to whether kvm_write_cptr_el2() reformats the sysreg fields
as cpacr_clear_set() does.

In the nVHE/hVHE implementation of __activate_cptr_traps(), placing the
sysreg writes within the if-else blocks requires that the call to
__activate_traps_fpsimd32() is moved earlier, but as this was always
called before writing to CPTR_EL2/CPACR_EL1, this should not result in a
functional change.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250210195226.1215254-6-mark.rutland@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c

index 47f2cf408eeda61a206c58e11169103c6187a5c9..78ec1ef2cfe82ac31b19cc26ece0d94cea19b46c 100644 (file)
@@ -605,48 +605,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
                                         __cpacr_to_cptr_set(clr, set));\
        } while (0)
 
-static __always_inline void kvm_write_cptr_el2(u64 val)
-{
-       if (has_vhe() || has_hvhe())
-               write_sysreg(val, cpacr_el1);
-       else
-               write_sysreg(val, cptr_el2);
-}
-
-/* Resets the value of cptr_el2 when returning to the host. */
-static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
-{
-       u64 val;
-
-       if (has_vhe()) {
-               val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN);
-               if (cpus_have_final_cap(ARM64_SME))
-                       val |= CPACR_EL1_SMEN_EL1EN;
-       } else if (has_hvhe()) {
-               val = CPACR_EL1_FPEN;
-
-               if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
-                       val |= CPACR_EL1_ZEN;
-               if (cpus_have_final_cap(ARM64_SME))
-                       val |= CPACR_EL1_SMEN;
-       } else {
-               val = CPTR_NVHE_EL2_RES1;
-
-               if (kvm_has_sve(kvm) && guest_owns_fp_regs())
-                       val |= CPTR_EL2_TZ;
-               if (!cpus_have_final_cap(ARM64_SME))
-                       val |= CPTR_EL2_TSM;
-       }
-
-       kvm_write_cptr_el2(val);
-}
-
-#ifdef __KVM_NVHE_HYPERVISOR__
-#define kvm_reset_cptr_el2(v)  __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
-#else
-#define kvm_reset_cptr_el2(v)  __kvm_reset_cptr_el2((v)->kvm)
-#endif
-
 /*
  * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
  * format if E2H isn't set.
index 7a2d189176249703e3f3e3accdaba1e4f40b2fbc..5d79f63a4f86143a742b5a14be189f052a9072c5 100644 (file)
@@ -39,6 +39,9 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 {
        u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
 
+       if (!guest_owns_fp_regs())
+               __activate_traps_fpsimd32(vcpu);
+
        if (has_hvhe()) {
                val |= CPACR_EL1_TTA;
 
@@ -47,6 +50,8 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
                        if (vcpu_has_sve(vcpu))
                                val |= CPACR_EL1_ZEN;
                }
+
+               write_sysreg(val, cpacr_el1);
        } else {
                val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
 
@@ -61,12 +66,34 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 
                if (!guest_owns_fp_regs())
                        val |= CPTR_EL2_TFP;
+
+               write_sysreg(val, cptr_el2);
        }
+}
 
-       if (!guest_owns_fp_regs())
-               __activate_traps_fpsimd32(vcpu);
+static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 
-       kvm_write_cptr_el2(val);
+       if (has_hvhe()) {
+               u64 val = CPACR_EL1_FPEN;
+
+               if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
+                       val |= CPACR_EL1_ZEN;
+               if (cpus_have_final_cap(ARM64_SME))
+                       val |= CPACR_EL1_SMEN;
+
+               write_sysreg(val, cpacr_el1);
+       } else {
+               u64 val = CPTR_NVHE_EL2_RES1;
+
+               if (kvm_has_sve(kvm) && guest_owns_fp_regs())
+                       val |= CPTR_EL2_TZ;
+               if (!cpus_have_final_cap(ARM64_SME))
+                       val |= CPTR_EL2_TSM;
+
+               write_sysreg(val, cptr_el2);
+       }
 }
 
 static void __activate_traps(struct kvm_vcpu *vcpu)
@@ -119,7 +146,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
 
        write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
 
-       kvm_reset_cptr_el2(vcpu);
+       __deactivate_cptr_traps(vcpu);
        write_sysreg(__kvm_hyp_host_vector, vbar_el2);
 }
 
index e8a07d4bb546b324fc8216a8fc4c46e9466239f0..4748b1947ffa02ead251e2e7105d298c483ca888 100644 (file)
@@ -136,6 +136,16 @@ write:
        write_sysreg(val, cpacr_el1);
 }
 
+static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
+{
+       u64 val = CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN;
+
+       if (cpus_have_final_cap(ARM64_SME))
+               val |= CPACR_EL1_SMEN_EL1EN;
+
+       write_sysreg(val, cpacr_el1);
+}
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 val;
@@ -207,7 +217,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
         */
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
-       kvm_reset_cptr_el2(vcpu);
+       __deactivate_cptr_traps(vcpu);
 
        if (!arm64_kernel_unmapped_at_el0())
                host_vectors = __this_cpu_read(this_cpu_vector);