]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64/sysreg: Get rid of CPACR_ELx SysregFields
authorMarc Zyngier <maz@kernel.org>
Thu, 19 Dec 2024 17:33:50 +0000 (17:33 +0000)
committerWill Deacon <will@kernel.org>
Thu, 19 Dec 2024 18:00:58 +0000 (18:00 +0000)
There is no such thing as CPACR_ELx in the architecture.
What we have is CPACR_EL1, for which CPTR_EL12 is an accessor.

Rename CPACR_ELx_* to CPACR_EL1_*, and fix the bit of code using
these names.

Reviewed-by: Mark Brown <broonie@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20241219173351.1123087-5-maz@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
13 files changed:
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/tools/sysreg

index 85ef966c08cd231a5872d2cf988470391ad2973e..cc39c57df7873945bcc796d564128b83c653495a 100644 (file)
 /* Coprocessor traps */
 .macro __init_el2_cptr
        __check_hvhe .LnVHE_\@, x1
-       mov     x0, #CPACR_ELx_FPEN
+       mov     x0, #CPACR_EL1_FPEN
        msr     cpacr_el1, x0
        b       .Lskip_set_cptr_\@
 .LnVHE_\@:
 
        // (h)VHE case
        mrs     x0, cpacr_el1                   // Disable SVE traps
-       orr     x0, x0, #CPACR_ELx_ZEN
+       orr     x0, x0, #CPACR_EL1_ZEN
        msr     cpacr_el1, x0
        b       .Lskip_set_cptr_\@
 
 
        // (h)VHE case
        mrs     x0, cpacr_el1                   // Disable SME traps
-       orr     x0, x0, #CPACR_ELx_SMEN
+       orr     x0, x0, #CPACR_EL1_SMEN
        msr     cpacr_el1, x0
        b       .Lskip_set_cptr_sme_\@
 
index 3e0f0de1d2da86ed84177a3c6732697015875b60..43e365fbff0b2a659e8b11c1c8b66a1423b192e1 100644 (file)
        ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
        ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
 
-#define CPACR_EL1_TTA          (1 << 28)
-
 #define kvm_mode_names                         \
        { PSR_MODE_EL0t,        "EL0t" },       \
        { PSR_MODE_EL1t,        "EL1t" },       \
index cf811009a33c9e36b65fd1e3a7ea8643d8a1a1c4..4f1d99725f6b3b6c14e110c5b6e967de9ae41812 100644 (file)
@@ -556,13 +556,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
        ({                                                              \
                u64 cptr = 0;                                           \
                                                                        \
-               if ((set) & CPACR_ELx_FPEN)                             \
+               if ((set) & CPACR_EL1_FPEN)                             \
                        cptr |= CPTR_EL2_TFP;                           \
-               if ((set) & CPACR_ELx_ZEN)                              \
+               if ((set) & CPACR_EL1_ZEN)                              \
                        cptr |= CPTR_EL2_TZ;                            \
-               if ((set) & CPACR_ELx_SMEN)                             \
+               if ((set) & CPACR_EL1_SMEN)                             \
                        cptr |= CPTR_EL2_TSM;                           \
-               if ((clr) & CPACR_ELx_TTA)                              \
+               if ((clr) & CPACR_EL1_TTA)                              \
                        cptr |= CPTR_EL2_TTA;                           \
                if ((clr) & CPTR_EL2_TAM)                               \
                        cptr |= CPTR_EL2_TAM;                           \
@@ -576,13 +576,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
        ({                                                              \
                u64 cptr = 0;                                           \
                                                                        \
-               if ((clr) & CPACR_ELx_FPEN)                             \
+               if ((clr) & CPACR_EL1_FPEN)                             \
                        cptr |= CPTR_EL2_TFP;                           \
-               if ((clr) & CPACR_ELx_ZEN)                              \
+               if ((clr) & CPACR_EL1_ZEN)                              \
                        cptr |= CPTR_EL2_TZ;                            \
-               if ((clr) & CPACR_ELx_SMEN)                             \
+               if ((clr) & CPACR_EL1_SMEN)                             \
                        cptr |= CPTR_EL2_TSM;                           \
-               if ((set) & CPACR_ELx_TTA)                              \
+               if ((set) & CPACR_EL1_TTA)                              \
                        cptr |= CPTR_EL2_TTA;                           \
                if ((set) & CPTR_EL2_TAM)                               \
                        cptr |= CPTR_EL2_TAM;                           \
@@ -595,13 +595,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
 #define cpacr_clear_set(clr, set)                                      \
        do {                                                            \
                BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0);                \
-               BUILD_BUG_ON((clr) & CPACR_ELx_E0POE);                  \
-               __build_check_all_or_none((clr), CPACR_ELx_FPEN);       \
-               __build_check_all_or_none((set), CPACR_ELx_FPEN);       \
-               __build_check_all_or_none((clr), CPACR_ELx_ZEN);        \
-               __build_check_all_or_none((set), CPACR_ELx_ZEN);        \
-               __build_check_all_or_none((clr), CPACR_ELx_SMEN);       \
-               __build_check_all_or_none((set), CPACR_ELx_SMEN);       \
+               BUILD_BUG_ON((clr) & CPACR_EL1_E0POE);                  \
+               __build_check_all_or_none((clr), CPACR_EL1_FPEN);       \
+               __build_check_all_or_none((set), CPACR_EL1_FPEN);       \
+               __build_check_all_or_none((clr), CPACR_EL1_ZEN);        \
+               __build_check_all_or_none((set), CPACR_EL1_ZEN);        \
+               __build_check_all_or_none((clr), CPACR_EL1_SMEN);       \
+               __build_check_all_or_none((set), CPACR_EL1_SMEN);       \
                                                                        \
                if (has_vhe() || has_hvhe())                            \
                        sysreg_clear_set(cpacr_el1, clr, set);          \
@@ -624,16 +624,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
        u64 val;
 
        if (has_vhe()) {
-               val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
+               val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN);
                if (cpus_have_final_cap(ARM64_SME))
                        val |= CPACR_EL1_SMEN_EL1EN;
        } else if (has_hvhe()) {
-               val = CPACR_ELx_FPEN;
+               val = CPACR_EL1_FPEN;
 
                if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
-                       val |= CPACR_ELx_ZEN;
+                       val |= CPACR_EL1_ZEN;
                if (cpus_have_final_cap(ARM64_SME))
-                       val |= CPACR_ELx_SMEN;
+                       val |= CPACR_EL1_SMEN;
        } else {
                val = CPTR_NVHE_EL2_RES1;
 
@@ -685,7 +685,7 @@ static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
 #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen)                           \
        (!vcpu_has_nv(vcpu) ? false :                                           \
         ____cptr_xen_trap_enabled(vcpu,                                        \
-                                  SYS_FIELD_GET(CPACR_ELx, xen,                \
+                                  SYS_FIELD_GET(CPACR_EL1, xen,                \
                                                 vcpu_sanitised_cptr_el2(vcpu))))
 
 static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
index 233e65522716441c95f2cf9ddbd56fc825e6bf72..6cd08198bf1957f13e8867d31f51506b33188554 100644 (file)
@@ -33,14 +33,14 @@ static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
 
 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
 {
-       u64 cpacr_el1 = CPACR_ELx_RES1;
+       u64 cpacr_el1 = CPACR_EL1_RES1;
 
        if (cptr_el2 & CPTR_EL2_TTA)
-               cpacr_el1 |= CPACR_ELx_TTA;
+               cpacr_el1 |= CPACR_EL1_TTA;
        if (!(cptr_el2 & CPTR_EL2_TFP))
-               cpacr_el1 |= CPACR_ELx_FPEN;
+               cpacr_el1 |= CPACR_EL1_FPEN;
        if (!(cptr_el2 & CPTR_EL2_TZ))
-               cpacr_el1 |= CPACR_ELx_ZEN;
+               cpacr_el1 |= CPACR_EL1_ZEN;
 
        cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
 
index 3229238a56acc8adc8d2fa9e99afb9da5838affc..13de0c7af053122379c922ee7e69a3dd41af850e 100644 (file)
@@ -2376,7 +2376,7 @@ static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
 static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused)
 {
        sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1_E0POE);
-       sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE);
+       sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_E0POE);
 }
 #endif
 
index 1ffbfd1c3cf2ecfa50284f8d4f54a22d06079ed2..f1b7287e1f3c317e547f650ccfb8c13dd863a9c0 100644 (file)
@@ -494,7 +494,7 @@ static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
        if (!vcpu_el2_e2h_is_set(vcpu))
                val = translate_cptr_el2_to_cpacr_el1(val);
 
-       if (val & CPACR_ELx_TTA)
+       if (val & CPACR_EL1_TTA)
                return BEHAVE_FORWARD_RW;
 
        return BEHAVE_HANDLE_LOCALLY;
index ea5484ce1f3ba3121b6938bda15f7a8057d49051..98718bd65bf15edeef281da3e84a2f03480f070a 100644 (file)
@@ -169,7 +169,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
        if (has_vhe() && system_supports_sme()) {
                /* Also restore EL0 state seen on entry */
                if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
-                       sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
+                       sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN);
                else
                        sysreg_clear_set(CPACR_EL1,
                                         CPACR_EL1_SMEN_EL0EN,
index 34f53707892dfe7bba41620e7adb65f1f8376018..abfa6ad92e91e28c8710913997d196c784d2979b 100644 (file)
@@ -419,9 +419,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 
        /* First disable enough traps to allow us to update the registers */
        if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
-               cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+               cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
        else
-               cpacr_clear_set(0, CPACR_ELx_FPEN);
+               cpacr_clear_set(0, CPACR_EL1_FPEN);
        isb();
 
        /* Write out the host state if it's in the registers */
index 6aa0b13d86e581a36ed529bcd932498045d2d6df..6c90ef6736d63eef1532f09fccb275da31000e88 100644 (file)
@@ -68,7 +68,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
        if (!guest_owns_fp_regs())
                return;
 
-       cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+       cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
        isb();
 
        if (vcpu_has_sve(vcpu))
@@ -481,7 +481,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
                handle_host_smc(host_ctxt);
                break;
        case ESR_ELx_EC_SVE:
-               cpacr_clear_set(0, CPACR_ELx_ZEN);
+               cpacr_clear_set(0, CPACR_EL1_ZEN);
                isb();
                sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
                                       SYS_ZCR_EL2);
index 01616c39a810777a123b6a3a5da33a16b6768ad7..a6638c4ecd32d54381361df9965790ceca4c41e3 100644 (file)
@@ -68,7 +68,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
        /* Trap SVE */
        if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
                if (has_hvhe())
-                       cptr_clear |= CPACR_ELx_ZEN;
+                       cptr_clear |= CPACR_EL1_ZEN;
                else
                        cptr_set |= CPTR_EL2_TZ;
        }
index cc69106734ca732ba9276ac1eaf84be3e7381648..0f6b01b3da5cfd4bfa7d75f901416abed0a5c7b2 100644 (file)
@@ -48,14 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
        val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
        if (cpus_have_final_cap(ARM64_SME)) {
                if (has_hvhe())
-                       val &= ~CPACR_ELx_SMEN;
+                       val &= ~CPACR_EL1_SMEN;
                else
                        val |= CPTR_EL2_TSM;
        }
 
        if (!guest_owns_fp_regs()) {
                if (has_hvhe())
-                       val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
+                       val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
                else
                        val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
 
@@ -192,7 +192,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
 
                /* Re-enable SVE traps if not supported for the guest vcpu. */
                if (!vcpu_has_sve(vcpu))
-                       cpacr_clear_set(CPACR_ELx_ZEN, 0);
+                       cpacr_clear_set(CPACR_EL1_ZEN, 0);
 
        } else {
                __fpsimd_save_state(*host_data_ptr(fpsimd_state));
index 80581b1c399595fd64d0ccada498edac322480a6..59d992455793d2c570b60beadcb6a796546c6bf5 100644 (file)
@@ -77,12 +77,12 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
         * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
         * shift value for trapping the AMU accesses.
         */
-       u64 val = CPACR_ELx_TTA | CPTR_EL2_TAM;
+       u64 val = CPACR_EL1_TTA | CPTR_EL2_TAM;
 
        if (guest_owns_fp_regs()) {
-               val |= CPACR_ELx_FPEN;
+               val |= CPACR_EL1_FPEN;
                if (vcpu_has_sve(vcpu))
-                       val |= CPACR_ELx_ZEN;
+                       val |= CPACR_EL1_ZEN;
        } else {
                __activate_traps_fpsimd32(vcpu);
        }
@@ -122,13 +122,13 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
         * hypervisor has traps enabled to dispel any illusion of something more
         * complicated taking place.
         */
-       if (!(SYS_FIELD_GET(CPACR_ELx, FPEN, cptr) & BIT(0)))
-               val &= ~CPACR_ELx_FPEN;
-       if (!(SYS_FIELD_GET(CPACR_ELx, ZEN, cptr) & BIT(0)))
-               val &= ~CPACR_ELx_ZEN;
+       if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
+               val &= ~CPACR_EL1_FPEN;
+       if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
+               val &= ~CPACR_EL1_ZEN;
 
        if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
-               val |= cptr & CPACR_ELx_E0POE;
+               val |= cptr & CPACR_EL1_E0POE;
 
        val |= cptr & CPTR_EL2_TCPAC;
 
index 899526e16a4cd2759bdbf1e0dc9d444741e6c2d7..40a9e4e2cae6909b5021ce1498c43e6f5ddb4a63 100644 (file)
@@ -1986,7 +1986,7 @@ Field     1       A
 Field  0       M
 EndSysreg
 
-SysregFields   CPACR_ELx
+Sysreg CPACR_EL1       3       0       1       0       2
 Res0   63:30
 Field  29      E0POE
 Field  28      TTA
@@ -1997,10 +1997,6 @@ Field    21:20   FPEN
 Res0   19:18
 Field  17:16   ZEN
 Res0   15:0
-EndSysregFields
-
-Sysreg CPACR_EL1       3       0       1       0       2
-Fields CPACR_ELx
 EndSysreg
 
 Sysreg SMPRI_EL1       3       0       1       2       4