FEAT_E2H0 is a formalisation of the existing behaviour of HCR_EL2.E2H
being programmable to switch between EL2 host mode and the
"traditional" nVHE EL2 mode. This implies at some point we might want
to model CPUs without FEAT_E2H0 which will always have EL2 host mode
enabled.
There are two values to represent no E2H0 systems of which 0b1110 will
make HCR_EL2.NV1 RES0 for FEAT_NV systems. For FEAT_NV2 the NV1 bit is
always valid.
Message-ID: <
20260130181648.628364-1-alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Mohamed Mediouni <mohamed@unpredictable.fr>
Message-id:
20260205210231.888199-1-alex.bennee@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
- FEAT_DotProd (Advanced SIMD dot product instructions)
- FEAT_DoubleFault (Double Fault Extension)
- FEAT_E0PD (Preventing EL0 access to halves of address maps)
+- FEAT_E2H0 (Programming of HCR_EL2.E2H)
- FEAT_EBF16 (AArch64 Extended BFloat16 instructions)
- FEAT_ECV (Enhanced Counter Virtualization)
- FEAT_EL0 (Support for execution at EL0)
FIELD(ID_AA64MMFR3, SPEC_FPACC, 60, 4)
FIELD(ID_AA64MMFR4, ASID2, 8, 4)
+FIELD(ID_AA64MMFR4, E2H0, 24, 4)
FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
return FIELD_EX64_IDREG(id, ID_AA64MMFR4, ASID2) != 0;
}
+/*
+ * Note the E2H0 ID fields is signed, increasingly negative as more
+ * isn't implemented.
+ */
+static inline bool isar_feature_aa64_e2h0(const ARMISARegisters *id)
+{
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR4, E2H0) >= 0;
+}
+
+static inline bool isar_feature_aa64_nv1_res0(const ARMISARegisters *id)
+{
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR4, E2H0) <= -2;
+}
+
static inline bool isar_feature_aa64_mec(const ARMISARegisters *id)
{
return FIELD_EX64_IDREG(id, ID_AA64MMFR3, MEC) != 0;
}
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
- if (cpu_isar_feature(aa64_vh, cpu)) {
+ if (cpu_isar_feature(aa64_vh, cpu) &&
+ cpu_isar_feature(aa64_e2h0, cpu)) {
valid_mask |= HCR_E2H;
}
if (cpu_isar_feature(aa64_ras, cpu)) {
valid_mask |= HCR_GPF;
}
if (cpu_isar_feature(aa64_nv, cpu)) {
- valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
+ valid_mask |= HCR_NV | HCR_AT;
+ if (!cpu_isar_feature(aa64_nv1_res0, cpu)) {
+ valid_mask |= HCR_NV1;
+ }
}
if (cpu_isar_feature(aa64_nv2, cpu)) {
valid_mask |= HCR_NV2;
/* Clear RES0 bits. */
value &= valid_mask;
- /* RW is RAO/WI if EL1 is AArch64 only */
- if (arm_feature(env, ARM_FEATURE_AARCH64) &&
- !cpu_isar_feature(aa64_aa32_el1, cpu)) {
- value |= HCR_RW;
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* RW is RAO/WI if EL1 is AArch64 only */
+ if (!cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ value |= HCR_RW;
+ }
+ /* Strictly E2H is RES1 unless FEAT_E2H0 relaxes the requirement */
+ if (!cpu_isar_feature(aa64_e2h0, cpu)) {
+ value |= HCR_E2H;
+ }
}
/*