]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Convert SCTLR_EL1 to config-driven sanitisation
authorMarc Zyngier <maz@kernel.org>
Mon, 14 Jul 2025 11:55:01 +0000 (12:55 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 16 Jul 2025 03:39:42 +0000 (20:39 -0700)
As for other registers, convert the determination of the RES0 bits
affecting SCTLR_EL1 to be driven by a table extracted from the 2025-06
JSON drop

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250714115503.3334242-4-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/config.c
arch/arm64/kvm/nested.c

index 8265b722d442b5ae9a6dc9cfcc6b311285a1f4ab..540308429ffe77ea4d61a2e0925ef9f6cc5e788c 100644 (file)
@@ -134,6 +134,20 @@ struct reg_bits_to_feat_map {
 #define FEAT_ASID2             ID_AA64MMFR4_EL1, ASID2, IMP
 #define FEAT_MEC               ID_AA64MMFR3_EL1, MEC, IMP
 #define FEAT_HAFT              ID_AA64MMFR1_EL1, HAFDBS, HAFT
+#define FEAT_BTI               ID_AA64PFR1_EL1, BT, IMP
+#define FEAT_ExS               ID_AA64MMFR0_EL1, EXS, IMP
+#define FEAT_IESB              ID_AA64MMFR2_EL1, IESB, IMP
+#define FEAT_LSE2              ID_AA64MMFR2_EL1, AT, IMP
+#define FEAT_LSMAOC            ID_AA64MMFR2_EL1, LSM, IMP
+#define FEAT_MixedEnd          ID_AA64MMFR0_EL1, BIGEND, IMP
+#define FEAT_MixedEndEL0       ID_AA64MMFR0_EL1, BIGENDEL0, IMP
+#define FEAT_MTE2              ID_AA64PFR1_EL1, MTE, MTE2
+#define FEAT_MTE_ASYNC         ID_AA64PFR1_EL1, MTE_frac, ASYNC
+#define FEAT_MTE_STORE_ONLY    ID_AA64PFR2_EL1, MTESTOREONLY, IMP
+#define FEAT_PAN               ID_AA64MMFR1_EL1, PAN, IMP
+#define FEAT_PAN3              ID_AA64MMFR1_EL1, PAN, PAN3
+#define FEAT_SSBS              ID_AA64PFR1_EL1, SSBS, IMP
+#define FEAT_TIDCP1            ID_AA64MMFR1_EL1, TIDCP1, IMP
 
 static bool not_feat_aa64el3(struct kvm *kvm)
 {
@@ -241,6 +255,16 @@ static bool feat_ebep_pmuv3_ss(struct kvm *kvm)
        return kvm_has_feat(kvm, FEAT_EBEP) || kvm_has_feat(kvm, FEAT_PMUv3_SS);
 }
 
+static bool feat_mixedendel0(struct kvm *kvm)
+{
+       return kvm_has_feat(kvm, FEAT_MixedEnd) || kvm_has_feat(kvm, FEAT_MixedEndEL0);
+}
+
+static bool feat_mte_async(struct kvm *kvm)
+{
+       return kvm_has_feat(kvm, FEAT_MTE2) && kvm_has_feat_enum(kvm, FEAT_MTE_ASYNC);
+}
+
 static bool compute_hcr_rw(struct kvm *kvm, u64 *bits)
 {
        /* This is purely academic: AArch32 and NV are mutually exclusive */
@@ -872,6 +896,80 @@ static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = {
        NEEDS_FEAT(TCR2_EL2_PIE, FEAT_S1PIE),
 };
 
+static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
+       NEEDS_FEAT(SCTLR_EL1_CP15BEN    |
+                  SCTLR_EL1_ITD        |
+                  SCTLR_EL1_SED,
+                  FEAT_AA32EL0),
+       NEEDS_FEAT(SCTLR_EL1_BT0        |
+                  SCTLR_EL1_BT1,
+                  FEAT_BTI),
+       NEEDS_FEAT(SCTLR_EL1_CMOW, FEAT_CMOW),
+       NEEDS_FEAT(SCTLR_EL1_TSCXT, feat_csv2_2_csv2_1p2),
+       NEEDS_FEAT(SCTLR_EL1_EIS        |
+                  SCTLR_EL1_EOS,
+                  FEAT_ExS),
+       NEEDS_FEAT(SCTLR_EL1_EnFPM, FEAT_FPMR),
+       NEEDS_FEAT(SCTLR_EL1_IESB, FEAT_IESB),
+       NEEDS_FEAT(SCTLR_EL1_EnALS, FEAT_LS64),
+       NEEDS_FEAT(SCTLR_EL1_EnAS0, FEAT_LS64_ACCDATA),
+       NEEDS_FEAT(SCTLR_EL1_EnASR, FEAT_LS64_V),
+       NEEDS_FEAT(SCTLR_EL1_nAA, FEAT_LSE2),
+       NEEDS_FEAT(SCTLR_EL1_LSMAOE     |
+                  SCTLR_EL1_nTLSMD,
+                  FEAT_LSMAOC),
+       NEEDS_FEAT(SCTLR_EL1_EE, FEAT_MixedEnd),
+       NEEDS_FEAT(SCTLR_EL1_E0E, feat_mixedendel0),
+       NEEDS_FEAT(SCTLR_EL1_MSCEn, FEAT_MOPS),
+       NEEDS_FEAT(SCTLR_EL1_ATA0       |
+                  SCTLR_EL1_ATA        |
+                  SCTLR_EL1_TCF0       |
+                  SCTLR_EL1_TCF,
+                  FEAT_MTE2),
+       NEEDS_FEAT(SCTLR_EL1_ITFSB, feat_mte_async),
+       NEEDS_FEAT(SCTLR_EL1_TCSO0      |
+                  SCTLR_EL1_TCSO,
+                  FEAT_MTE_STORE_ONLY),
+       NEEDS_FEAT(SCTLR_EL1_NMI        |
+                  SCTLR_EL1_SPINTMASK,
+                  FEAT_NMI),
+       NEEDS_FEAT(SCTLR_EL1_SPAN, FEAT_PAN),
+       NEEDS_FEAT(SCTLR_EL1_EPAN, FEAT_PAN3),
+       NEEDS_FEAT(SCTLR_EL1_EnDA       |
+                  SCTLR_EL1_EnDB       |
+                  SCTLR_EL1_EnIA       |
+                  SCTLR_EL1_EnIB,
+                  feat_pauth),
+       NEEDS_FEAT(SCTLR_EL1_EnTP2, FEAT_SME),
+       NEEDS_FEAT(SCTLR_EL1_EnRCTX, FEAT_SPECRES),
+       NEEDS_FEAT(SCTLR_EL1_DSSBS, FEAT_SSBS),
+       NEEDS_FEAT(SCTLR_EL1_TIDCP, FEAT_TIDCP1),
+       NEEDS_FEAT(SCTLR_EL1_TME0       |
+                  SCTLR_EL1_TME        |
+                  SCTLR_EL1_TMT0       |
+                  SCTLR_EL1_TMT,
+                  FEAT_TME),
+       NEEDS_FEAT(SCTLR_EL1_TWEDEL     |
+                  SCTLR_EL1_TWEDEn,
+                  FEAT_TWED),
+       NEEDS_FEAT(SCTLR_EL1_UCI        |
+                  SCTLR_EL1_EE         |
+                  SCTLR_EL1_E0E        |
+                  SCTLR_EL1_WXN        |
+                  SCTLR_EL1_nTWE       |
+                  SCTLR_EL1_nTWI       |
+                  SCTLR_EL1_UCT        |
+                  SCTLR_EL1_DZE        |
+                  SCTLR_EL1_I          |
+                  SCTLR_EL1_UMA        |
+                  SCTLR_EL1_SA0        |
+                  SCTLR_EL1_SA         |
+                  SCTLR_EL1_C          |
+                  SCTLR_EL1_A          |
+                  SCTLR_EL1_M,
+                  FEAT_AA64EL1),
+};
+
 static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
                                  int map_size, u64 res0, const char *str)
 {
@@ -905,6 +1003,8 @@ void __init check_feature_map(void)
                       HCR_EL2_RES0, "HCR_EL2");
        check_feat_map(tcr2_el2_feat_map, ARRAY_SIZE(tcr2_el2_feat_map),
                       TCR2_EL2_RES0, "TCR2_EL2");
+       check_feat_map(sctlr_el1_feat_map, ARRAY_SIZE(sctlr_el1_feat_map),
+                      SCTLR_EL1_RES0, "SCTLR_EL1");
 }
 
 static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
@@ -1125,6 +1225,12 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
                *res0 |= TCR2_EL2_RES0;
                *res1 = TCR2_EL2_RES1;
                break;
+       case SCTLR_EL1:
+               *res0 = compute_res0_bits(kvm, sctlr_el1_feat_map,
+                                         ARRAY_SIZE(sctlr_el1_feat_map), 0, 0);
+               *res0 |= SCTLR_EL1_RES0;
+               *res1 = SCTLR_EL1_RES1;
+               break;
        default:
                WARN_ON_ONCE(1);
                *res0 = *res1 = 0;
index efb1f2caca6268b8fe374ab939fb6aa2731697bf..bca4b5d4b9898b1492ca468186713271a4e92258 100644 (file)
@@ -1667,10 +1667,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
        set_sysreg_masks(kvm, TCR2_EL2, res0, res1);
 
        /* SCTLR_EL1 */
-       res0 = SCTLR_EL1_RES0;
-       res1 = SCTLR_EL1_RES1;
-       if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
-               res0 |= SCTLR_EL1_EPAN;
+       get_reg_fixed_bits(kvm, SCTLR_EL1, &res0, &res1);
        set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
 
        /* MDCR_EL2 */