]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64: Add override for MPAM
authorXi Ruoyao <xry111@xry111.site>
Mon, 2 Jun 2025 04:33:21 +0000 (12:33 +0800)
committerWill Deacon <will@kernel.org>
Mon, 2 Jun 2025 12:49:09 +0000 (13:49 +0100)
As the message of the commit 09e6b306f3ba ("arm64: cpufeature: discover
CPU support for MPAM") already states, if a buggy firmware fails to
either enable MPAM or emulate the trap as if it were disabled, the
kernel will just fail to boot.  While upgrading the firmware should be
the best solution, we have some hardware of which the vendor have made
no response 2 months after we requested a firmware update.  Allow
overriding it so our devices don't become some e-waste.

Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Cc: Mingcong Bai <jeffbai@aosc.io>
Cc: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Cc: Ben Horgan <ben.horgan@arm.com>
Signed-off-by: Xi Ruoyao <xry111@xry111.site>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250602043723.216338-1-xry111@xry111.site
Signed-off-by: Will Deacon <will@kernel.org>
Documentation/admin-guide/kernel-parameters.txt
arch/arm64/include/asm/el2_setup.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/pi/idreg-override.c

index 76e538c77e31615c9cd6a650bb8a72ead14b8008..c16717dc5e5712e3e3ae5df01156e1e792c735af 100644 (file)
        arm64.nomops    [ARM64] Unconditionally disable Memory Copy and Memory
                        Set instructions support
 
+       arm64.nompam    [ARM64] Unconditionally disable Memory Partitioning And
+                       Monitoring support
+
        arm64.nomte     [ARM64] Unconditionally disable Memory Tagging Extension
                        support
 
index 30f57b0334a32ae7fda15523209a1f1c4e099c52..af7807f11df40d5dceedd2cb57fbf305cc9bfc66 100644 (file)
 .Lskip_gcs_\@:
 .endm
 
-.macro __init_el2_mpam
-       /* Memory Partitioning And Monitoring: disable EL2 traps */
-       mrs     x1, id_aa64pfr0_el1
-       ubfx    x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4
-       cbz     x0, .Lskip_mpam_\@              // skip if no MPAM
-       msr_s   SYS_MPAM2_EL2, xzr              // use the default partition
-                                               // and disable lower traps
-       mrs_s   x0, SYS_MPAMIDR_EL1
-       tbz     x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@  // skip if no MPAMHCR reg
-       msr_s   SYS_MPAMHCR_EL2, xzr            // clear TRAP_MPAMIDR_EL1 -> EL2
-.Lskip_mpam_\@:
-.endm
-
 /**
  * Initialize EL2 registers to sane values. This should be called early on all
  * cores that were booted in EL2. Note that everything gets initialised as
        __init_el2_stage2
        __init_el2_gicv3
        __init_el2_hstr
-       __init_el2_mpam
        __init_el2_nvhe_idregs
        __init_el2_cptr
        __init_el2_fgt
 #endif
 
 .macro finalise_el2_state
+       check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2
+
+.Linit_mpam_\@:
+       msr_s   SYS_MPAM2_EL2, xzr              // use the default partition
+                                               // and disable lower traps
+       mrs_s   x0, SYS_MPAMIDR_EL1
+       tbz     x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@  // skip if no MPAMHCR reg
+       msr_s   SYS_MPAMHCR_EL2, xzr            // clear TRAP_MPAMIDR_EL1 -> EL2
+
+.Lskip_mpam_\@:
        check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
 
 .Linit_sve_\@: /* SVE register access */
index 379c82d22c75bfa628b4c257c3f62a2930594c32..fcc20d13e938234616c449e08893e150ddc9e4e5 100644 (file)
@@ -1198,8 +1198,10 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
                cpacr_restore(cpacr);
        }
 
-       if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
+       if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+               info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
                init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
+       }
 
        if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
                init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
@@ -1452,7 +1454,8 @@ void update_cpu_features(int cpu,
                cpacr_restore(cpacr);
        }
 
-       if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) {
+       if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+               info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
                taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
                                        info->reg_mpamidr, boot->reg_mpamidr);
        }
index 94525abd1c225c63e6a193916c1a8b47a27e77b4..c1f2b6b04b411ad3b310a0fc905e4dc594463082 100644 (file)
@@ -496,8 +496,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
                __cpuinfo_store_cpu_32bit(&info->aarch32);
 
-       if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
-               info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
+       /*
+        * info->reg_mpamidr deferred to {init,update}_cpu_features because we
+        * don't want to read it (and trigger a trap on buggy firmware) if
+        * using an aa64pfr0_el1 override to unconditionally disable MPAM.
+        */
 
        if (IS_ENABLED(CONFIG_ARM64_SME) &&
            id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
index c6b185b885f7002c74230643f03e231ce68d9618..bc57b290e5e7bab51a9de90d23fe36e1640e4b6b 100644 (file)
@@ -127,6 +127,7 @@ static const struct ftr_set_desc pfr0 __prel64_initconst = {
        .fields         = {
                FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
                FIELD("el0", ID_AA64PFR0_EL1_EL0_SHIFT, NULL),
+               FIELD("mpam", ID_AA64PFR0_EL1_MPAM_SHIFT, NULL),
                {}
        },
 };
@@ -154,6 +155,7 @@ static const struct ftr_set_desc pfr1 __prel64_initconst = {
                FIELD("gcs", ID_AA64PFR1_EL1_GCS_SHIFT, NULL),
                FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
                FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
+               FIELD("mpam_frac", ID_AA64PFR1_EL1_MPAM_frac_SHIFT, NULL),
                {}
        },
 };
@@ -246,6 +248,7 @@ static const struct {
        { "rodata=off",                 "arm64_sw.rodataoff=1" },
        { "arm64.nolva",                "id_aa64mmfr2.varange=0" },
        { "arm64.no32bit_el0",          "id_aa64pfr0.el0=1" },
+       { "arm64.nompam",               "id_aa64pfr0.mpam=0 id_aa64pfr1.mpam_frac=0" },
 };
 
 static int __init parse_hexdigit(const char *p, u64 *v)