if (likely(!maybe_resized)) {
/* Alignment has not been checked by tlb_fill_align. */
- int a_bits = memop_alignment_bits(memop);
-
- /*
- * This alignment check differs from the one above, in that this is
- * based on the atomicity of the operation. The intended use case is
- * the ARM memory type field of each PTE, where access to pages with
- * Device memory type require alignment.
- */
- if (unlikely(flags & TLB_CHECK_ALIGNED)) {
- int at_bits = memop_atomicity_bits(memop);
- a_bits = MAX(a_bits, at_bits);
- }
+ int a_bits = memop_tlb_alignment_bits(memop, flags & TLB_CHECK_ALIGNED);
if (unlikely(addr & ((1 << a_bits) - 1))) {
cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
}
MO_ALIGN_64 = 6 << MO_ASHIFT,
MO_ALIGN = MO_AMASK,
+ /*
+ * MO_ALIGN_TLB_ONLY:
+ * Apply MO_AMASK only along the TCG slow path if TLB_CHECK_ALIGNED
+ * is set; otherwise unaligned access is permitted.
+ * This is used by target/arm, where unaligned accesses are
+ * permitted for pages marked Normal but aligned accesses are
+ * required for pages marked Device.
+ */
+ MO_ALIGN_TLB_ONLY = 1 << 8,
+
/*
* MO_ATOM_* describes the atomicity requirements of the operation:
* MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
* size of the operation, if aligned. This retains the behaviour
* from before this field was introduced.
*/
- MO_ATOM_SHIFT = 8,
+ MO_ATOM_SHIFT = 9,
MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT,
MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT,
MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT,
}
/**
- * memop_alignment_bits:
+ * memop_tlb_alignment_bits:
* @memop: MemOp value
*
- * Extract the alignment size from the memop.
+ * Extract the alignment size for use with TLB_CHECK_ALIGNED.
*/
-static inline unsigned memop_alignment_bits(MemOp memop)
+static inline unsigned memop_tlb_alignment_bits(MemOp memop, bool tlb_check)
{
unsigned a = memop & MO_AMASK;
- if (a == MO_UNALN) {
+ if (a == MO_UNALN || (!tlb_check && (memop & MO_ALIGN_TLB_ONLY))) {
/* No alignment required. */
a = 0;
} else if (a == MO_ALIGN) {
return a;
}
-/*
- * memop_atomicity_bits:
+/**
+ * memop_alignment_bits:
* @memop: MemOp value
*
- * Extract the atomicity size from the memop.
+ * Extract the alignment size from the memop.
*/
-static inline unsigned memop_atomicity_bits(MemOp memop)
+static inline unsigned memop_alignment_bits(MemOp memop)
{
- unsigned size = memop & MO_SIZE;
-
- switch (memop & MO_ATOM_MASK) {
- case MO_ATOM_NONE:
- size = MO_8;
- break;
- case MO_ATOM_IFALIGN_PAIR:
- case MO_ATOM_WITHIN16_PAIR:
- size = size ? size - 1 : 0;
- break;
- default:
- break;
- }
- return size;
+ return memop_tlb_alignment_bits(memop, false);
}
#endif
* CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
*/
if (device) {
- unsigned a_bits = memop_atomicity_bits(memop);
+ unsigned a_bits = memop_tlb_alignment_bits(memop, true);
if (address & ((1 << a_bits) - 1)) {
fi->type = ARMFault_Alignment;
goto do_fault;
* In all cases, issue one operation with the correct atomicity.
*/
mop = a->sz + 1;
- if (s->align_mem) {
- mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
- }
+ mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
+ mop |= (s->align_mem ? 0 : MO_ALIGN_TLB_ONLY);
mop = finalize_memop_pair(s, mop);
if (a->sz == 2) {
TCGv_i64 tmp = tcg_temp_new_i64();
* since that reuses the most code below.
*/
mop = a->sz + 1;
- if (s->align_mem) {
- mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
- }
+ mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
+ mop |= (s->align_mem ? 0 : MO_ALIGN_TLB_ONLY);
mop = finalize_memop_pair(s, mop);
if (a->sz == 2) {
int o2 = s->be_data == MO_LE ? 32 : 0;
if (a->align) {
align = pow2_align(a->align + 2); /* 4 ** a->align */
} else {
- align = s->align_mem ? MO_ALIGN : 0;
+ align = MO_ALIGN | (s->align_mem ? 0 : MO_ALIGN_TLB_ONLY);
}
/*
*/
static inline MemOp finalize_memop_atom(DisasContext *s, MemOp opc, MemOp atom)
{
- if (s->align_mem && !(opc & MO_AMASK)) {
- opc |= MO_ALIGN;
+ if (!(opc & MO_AMASK)) {
+ opc |= MO_ALIGN | (s->align_mem ? 0 : MO_ALIGN_TLB_ONLY);
}
return opc | atom | s->be_data;
}
case INDEX_op_qemu_ld2:
case INDEX_op_qemu_st2:
{
- const char *s_al, *s_op, *s_at;
+ const char *s_al, *s_tlb, *s_op, *s_at;
MemOpIdx oi = op->args[k++];
MemOp mop = get_memop(oi);
unsigned ix = get_mmuidx(oi);
+ s_tlb = mop & MO_ALIGN_TLB_ONLY ? "tlb+" : "";
s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
- mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
+ mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE |
+ MO_ATOM_MASK | MO_ALIGN_TLB_ONLY);
/* If all fields are accounted for, print symbolically. */
if (!mop && s_al && s_op && s_at) {
- col += ne_fprintf(f, ",%s%s%s,%u",
- s_at, s_al, s_op, ix);
+ col += ne_fprintf(f, ",%s%s%s%s,%u",
+ s_at, s_al, s_tlb, s_op, ix);
} else {
mop = get_memop(oi);
col += ne_fprintf(f, ",$0x%x,%u", mop, ix);