};
/*
- * Fix the number of mmu modes to 16.
+ * Fix the number of mmu modes across all targets.
+ * Current maximum is target/arm/.
*/
-#define NB_MMU_MODES 16
+#define NB_MMU_MODES 22
typedef uint32_t MMUIdxMap;
/* Use a fully associative victim tlb of 8 entries. */
*/
return (ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_Stage2 |
ARMMMUIdxBit_Stage2_S);
}
*/
if (changed & (SCR_NS | SCR_NSE)) {
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2));
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS));
}
}
(arm_hcr_el2_eff(env) & HCR_E2H)) {
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
FIELD(MMUIDXINFO, USER, 8, 1)
FIELD(MMUIDXINFO, STAGE1, 9, 1)
FIELD(MMUIDXINFO, STAGE2, 10, 1)
+FIELD(MMUIDXINFO, GCS, 11, 1)
extern const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8];
return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE2);
}
+/* Return true if this mmu index implies AccessType_GCS. */
+static inline bool regime_is_gcs(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, GCS);
+}
+
#endif /* TARGET_ARM_MMUIDX_INTERNAL_H */
#define USER R_MMUIDXINFO_USER_MASK
#define S1 R_MMUIDXINFO_STAGE1_MASK
#define S2 R_MMUIDXINFO_STAGE2_MASK
+#define GCS R_MMUIDXINFO_GCS_MASK
const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8] = {
/*
* A-profile.
*/
[ARMMMUIdx_E10_0] = EL(0) | REL(1) | R2,
+ [ARMMMUIdx_E10_0_GCS] = EL(0) | REL(1) | R2 | GCS,
[ARMMMUIdx_E10_1] = EL(1) | REL(1) | R2,
[ARMMMUIdx_E10_1_PAN] = EL(1) | REL(1) | R2 | PAN,
+ [ARMMMUIdx_E10_1_GCS] = EL(1) | REL(1) | R2 | GCS,
[ARMMMUIdx_E20_0] = EL(0) | REL(2) | R2,
+ [ARMMMUIdx_E20_0_GCS] = EL(0) | REL(2) | R2 | GCS,
[ARMMMUIdx_E20_2] = EL(2) | REL(2) | R2,
[ARMMMUIdx_E20_2_PAN] = EL(2) | REL(2) | R2 | PAN,
+ [ARMMMUIdx_E20_2_GCS] = EL(2) | REL(2) | R2 | GCS,
[ARMMMUIdx_E2] = EL(2) | REL(2),
+ [ARMMMUIdx_E2_GCS] = EL(2) | REL(2) | GCS,
[ARMMMUIdx_E3] = EL(3) | REL(3),
+ [ARMMMUIdx_E3_GCS] = EL(3) | REL(3) | GCS,
[ARMMMUIdx_E30_0] = EL(0) | REL(3),
[ARMMMUIdx_E30_3_PAN] = EL(3) | REL(3) | PAN,
[ARMMMUIdx_Stage2] = REL(2) | S2,
[ARMMMUIdx_Stage1_E0] = REL(1) | R2 | S1 | USER,
+ [ARMMMUIdx_Stage1_E0_GCS] = REL(1) | R2 | S1 | USER | GCS,
[ARMMMUIdx_Stage1_E1] = REL(1) | R2 | S1,
[ARMMMUIdx_Stage1_E1_PAN] = REL(1) | R2 | S1 | PAN,
+ [ARMMMUIdx_Stage1_E1_GCS] = REL(1) | R2 | S1 | GCS,
/*
* M-profile.
* already heavyweight.
* 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
* because both are in use simultaneously for Secure EL2.
+ * 9. we need separate indexes for handling AccessType_GCS.
*
* This gives us the following list of cases:
*
* EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
+ * EL0 EL1&0 stage 1+2 +GCS
* EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
* EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
+ * EL1 EL1&0 stage 1+2 +GCS
* EL0 EL2&0
+ * EL0 EL2&0 +GCS
* EL2 EL2&0
* EL2 EL2&0 +PAN
+ * EL2 EL2&0 +GCS
* EL2 (aka NS PL2)
+ * EL2 +GCS
* EL3 (aka AArch32 S PL1 PL1&0)
+ * EL3 +GCS
* AArch32 S PL0 PL1&0 (we call this EL30_0)
* AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
* Stage2 Secure
* Stage2 NonSecure
* plus one TLB per Physical address space: S, NS, Realm, Root
*
- * for a total of 16 different mmu_idx.
+ * for a total of 22 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
* For M profile we arrange them to have a bit for priv, a bit for negpri
* and a bit for secure.
*/
-#define ARM_MMU_IDX_A 0x10 /* A profile */
-#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
-#define ARM_MMU_IDX_M 0x40 /* M profile */
+#define ARM_MMU_IDX_A 0x20 /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x40 /* does not have a TLB */
+#define ARM_MMU_IDX_M 0x80 /* M profile */
/* Meanings of the bits for M profile mmu idx values */
#define ARM_MMU_IDX_M_PRIV 0x1
#define ARM_MMU_IDX_TYPE_MASK \
(ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
-#define ARM_MMU_IDX_COREIDX_MASK 0xf
+#define ARM_MMU_IDX_COREIDX_MASK 0x1f
typedef enum ARMMMUIdx {
/*
* A-profile.
*/
- ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_0_GCS = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_GCS = 4 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E20_0 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_0_GCS = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_PAN = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_GCS = 9 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E2 = 10 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E2_GCS = 11 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E3 = 12 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E3_GCS = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_0 = 14 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_3_PAN = 15 | ARM_MMU_IDX_A,
/*
* Used for second stage of an S12 page table walk, or for descriptor
* are in use simultaneously for SecureEL2: the security state for
* the S2 ptw is selected by the NS bit from the S1 ptw.
*/
- ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2_S = 16 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 17 | ARM_MMU_IDX_A,
/* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_S = 18 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_NS = 19 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Root = 20 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Realm = 21 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E0_GCS = 3 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1_GCS = 4 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E10_0),
- TO_CORE_BIT(E20_0),
+ TO_CORE_BIT(E10_0_GCS),
TO_CORE_BIT(E10_1),
TO_CORE_BIT(E10_1_PAN),
- TO_CORE_BIT(E2),
+ TO_CORE_BIT(E10_1_GCS),
+ TO_CORE_BIT(E20_0),
+ TO_CORE_BIT(E20_0_GCS),
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
+ TO_CORE_BIT(E20_2_GCS),
+ TO_CORE_BIT(E2),
+ TO_CORE_BIT(E2_GCS),
TO_CORE_BIT(E3),
+ TO_CORE_BIT(E3_GCS),
TO_CORE_BIT(E30_0),
TO_CORE_BIT(E30_3_PAN),
TO_CORE_BIT(Stage2),
return ARMMMUIdx_Stage1_E1;
case ARMMMUIdx_E10_1_PAN:
return ARMMMUIdx_Stage1_E1_PAN;
+ case ARMMMUIdx_E10_0_GCS:
+ return ARMMMUIdx_Stage1_E0_GCS;
+ case ARMMMUIdx_E10_1_GCS:
+ return ARMMMUIdx_Stage1_E1_GCS;
default:
return mmu_idx;
}
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_TGE) {
break;
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_DC) {
break;
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
break;
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
ss = ARMSS_Secure;
break;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
CPUState *cs = env_cpu(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
+ ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
}
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E2);
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
}
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
/*
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
} else {
/* This is AArch64 only, so we don't need to touch the EL30_x TLBs */
mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
- ARMMMUIdxBit_E10_0;
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS;
}
return mask;
}
if (hcr & HCR_E2H) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
} else {
- mask = ARMMMUIdxBit_E2;
+ mask = ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS;
}
return mask;
}
+static int vae3_tlbmask(void)
+{
+ return ARMMMUIdxBit_E3 | ARMMMUIdxBit_E3_GCS;
+}
+
/* Return 56 if TBI is enabled, 64 otherwise. */
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
static int e2_tlbmask(CPUARMState *env)
{
return (ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2);
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
+ tlb_flush_by_mmuidx(cs, vae3_tlbmask());
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, vae3_tlbmask());
}
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, vae3_tlbmask());
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E3, bits);
+ vae3_tlbmask(), bits);
}
static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
* flush-last-level-only.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
+ do_rvae_write(env, value, vae3_tlbmask(), tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
* flush-last-level-only or inner/outer specific flushes.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
+ do_rvae_write(env, value, vae3_tlbmask(), true);
}
static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,