* table over and over.
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
* Never (PAN) bit within PSTATE.
+ * 7. we fold together the secure and non-secure regimes for A-profile,
+ * because there are no banked system registers for aarch64, so the
+ * process of switching between secure and non-secure is
+ * already heavyweight.
*
* This gives us the following list of cases:
*
- * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
- * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
- * NS EL1 EL1&0 stage 1+2 +PAN
- * NS EL0 EL2&0
- * NS EL2 EL2&0
- * NS EL2 EL2&0 +PAN
- * NS EL2 (aka NS PL2)
- * S EL0 EL1&0 (aka S PL0)
- * S EL1 EL1&0 (not used if EL3 is 32 bit)
- * S EL1 EL1&0 +PAN
- * S EL3 (aka S PL1)
+ * EL0 EL1&0 stage 1+2 (aka NS PL0)
+ * EL1 EL1&0 stage 1+2 (aka NS PL1)
+ * EL1 EL1&0 stage 1+2 +PAN
+ * EL0 EL2&0
+ * EL2 EL2&0
+ * EL2 EL2&0 +PAN
+ * EL2 (aka NS PL2)
+ * EL3 (aka S PL1)
*
- * for a total of 11 different mmu_idx.
+ * for a total of 8 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
- * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
- * NS EL2 if we ever model a Cortex-R52).
+ * as A profile. They only need to distinguish EL0 and EL1 (and
+ * EL2 if we ever model a Cortex-R52).
*
* M profile CPUs are rather different as they do not have a true MMU.
* They have the following different MMU indexes:
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
#define ARM_MMU_IDX_M 0x40 /* M profile */
-/* Meanings of the bits for A profile mmu idx values */
-#define ARM_MMU_IDX_A_NS 0x8
-
/* Meanings of the bits for M profile mmu idx values */
#define ARM_MMU_IDX_M_PRIV 0x1
#define ARM_MMU_IDX_M_NEGPRI 0x2
/*
* A-profile.
*/
- ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
-
- ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
/*
* Not allocated a TLB: used only for second stage of an S12 page
* table walk, or for descriptor loads during first stage of an S1
* then various TLB flush insns which currently are no-ops or flush
* only stage 1 MMU indexes will need to change to flush stage 2.
*/
- ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage2_S = 4 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
TO_CORE_BIT(E2),
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
- TO_CORE_BIT(SE10_0),
- TO_CORE_BIT(SE20_0),
- TO_CORE_BIT(SE10_1),
- TO_CORE_BIT(SE20_2),
- TO_CORE_BIT(SE10_1_PAN),
- TO_CORE_BIT(SE20_2_PAN),
- TO_CORE_BIT(SE2),
- TO_CORE_BIT(SE3),
+ TO_CORE_BIT(E3),
TO_CORE_BIT(MUser),
TO_CORE_BIT(MPriv),
/* Begin with base v8.0 state. */
uint64_t valid_mask = 0x3fff;
ARMCPU *cpu = env_archcpu(env);
+ uint64_t changed;
/*
* Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
/* Clear all-context RES0 bits. */
value &= valid_mask;
- raw_write(env, ri, value);
+ changed = env->cp15.scr_el3 ^ value;
+ env->cp15.scr_el3 = value;
+
+ /*
+ * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
+ * we must invalidate all TLBs below EL3.
+ */
+ if (changed & SCR_NS) {
+ tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2));
+ }
}
static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return GTIMER_HYP;
default:
return GTIMER_PHYS;
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return GTIMER_HYPVIRT;
default:
return GTIMER_VIRT;
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_SE3;
+ mmu_idx = ARMMMUIdx_E3;
secure = true;
break;
case 2:
/* fall through */
case 1:
if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
- mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
- : ARMMMUIdx_Stage1_E1_PAN);
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
} else {
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
+ mmu_idx = ARMMMUIdx_Stage1_E1;
}
break;
default:
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_SE10_0;
+ mmu_idx = ARMMMUIdx_E10_0;
secure = true;
break;
case 2:
mmu_idx = ARMMMUIdx_Stage1_E0;
break;
case 1:
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
+ mmu_idx = ARMMMUIdx_Stage1_E0;
break;
default:
g_assert_not_reached();
switch (ri->opc1) {
case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
- mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
- : ARMMMUIdx_Stage1_E1_PAN);
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
} else {
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
+ mmu_idx = ARMMMUIdx_Stage1_E1;
}
break;
case 4: /* AT S1E2R, AT S1E2W */
- mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
+ mmu_idx = ARMMMUIdx_E2;
break;
case 6: /* AT S1E3R, AT S1E3W */
- mmu_idx = ARMMMUIdx_SE3;
+ mmu_idx = ARMMMUIdx_E3;
secure = true;
break;
default:
}
break;
case 2: /* AT S1E0R, AT S1E0W */
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
+ mmu_idx = ARMMMUIdx_Stage1_E0;
break;
case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
+ mmu_idx = ARMMMUIdx_E10_1;
break;
case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
+ mmu_idx = ARMMMUIdx_E10_0;
break;
default:
g_assert_not_reached();
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0;
-
- if (arm_is_secure_below_el3(env)) {
- mask >>= ARM_MMU_IDX_A_NS;
- }
-
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
uint16_t mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
-
- if (arm_is_secure_below_el3(env)) {
- mask >>= ARM_MMU_IDX_A_NS;
- }
-
tlb_flush_by_mmuidx(cs, mask);
raw_write(env, ri, value);
}
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
}
-
- if (arm_is_secure_below_el3(env)) {
- mask >>= ARM_MMU_IDX_A_NS;
- }
-
return mask;
}
mmu_idx = ARMMMUIdx_E10_0;
}
- if (arm_is_secure_below_el3(env)) {
- mmu_idx &= ~ARM_MMU_IDX_A_NS;
- }
-
return tlbbits_for_regime(env, mmu_idx, addr);
}
* stage 2 translations, whereas most other scopes only invalidate
* stage 1 translations.
*/
- if (arm_is_secure_below_el3(env)) {
- return ARMMMUIdxBit_SE10_1 |
- ARMMMUIdxBit_SE10_1_PAN |
- ARMMMUIdxBit_SE10_0;
- } else {
- return ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E10_1_PAN |
- ARMMMUIdxBit_E10_0;
- }
+ return (ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0);
}
static int e2_tlbmask(CPUARMState *env)
{
- if (arm_is_secure_below_el3(env)) {
- return ARMMMUIdxBit_SE20_0 |
- ARMMMUIdxBit_SE20_2 |
- ARMMMUIdxBit_SE20_2_PAN |
- ARMMMUIdxBit_SE2;
- } else {
- return ARMMMUIdxBit_E20_0 |
- ARMMMUIdxBit_E20_2 |
- ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2;
- }
+ return (ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2);
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
}
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- bool secure = arm_is_secure_below_el3(env);
- int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
- int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
- pageaddr);
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ ARMMMUIdxBit_E2, bits);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_SE3, bits);
+ ARMMMUIdxBit_E3, bits);
}
#ifdef TARGET_AARCH64
static int vae2_tlbmask(CPUARMState *env)
{
- return (arm_is_secure_below_el3(env)
- ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
+ return ARMMMUIdxBit_E2;
}
static void tlbi_aa64_rvae2_write(CPUARMState *env,
* flush-last-level-only.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_SE3,
- tlb_force_broadcast(env));
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
* flush-last-level-only or inner/outer specific flushes.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
}
#endif
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
- el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
- ? 2 : 1;
+ el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
}
return env->cp15.sctlr_el[el];
}
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_SE20_0:
return 0;
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
return 1;
case ARMMMUIdx_E2:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE2:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return 2;
- case ARMMMUIdx_SE3:
+ case ARMMMUIdx_E3:
return 3;
default:
g_assert_not_reached();
}
break;
case 3:
- return ARMMMUIdx_SE3;
+ return ARMMMUIdx_E3;
default:
g_assert_not_reached();
}
- if (arm_is_secure_below_el3(env)) {
- idx &= ~ARM_MMU_IDX_A_NS;
- }
-
return idx;
}
switch (mmu_idx) {
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
/* TODO: ARMv8.3-NV */
DP_TBFLAG_A64(flags, UNPRIV, 1);
break;
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
/*
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.