tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
/* Handle CPU specific unaligned behaviour */
- a_bits = get_alignment_bits(l->memop);
+ a_bits = memop_alignment_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
uintptr_t index;
CPUTLBEntry *tlbe;
vaddr tlb_addr;
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
MemOp mop, uintptr_t ra, MMUAccessType type)
{
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
- int a_bits = get_alignment_bits(mop);
+ int a_bits = memop_alignment_bits(mop);
void *ret;
/* Enforce guest required alignment. */
}
/**
- * get_alignment_bits
+ * memop_alignment_bits:
* @memop: MemOp value
*
* Extract the alignment size from the memop.
*/
-static inline unsigned get_alignment_bits(MemOp memop)
+static inline unsigned memop_alignment_bits(MemOp memop)
{
unsigned a = memop & MO_AMASK;
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
ret = tcg_temp_new_i64();
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
- desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
+ desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop));
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
ret = tcg_temp_new_i64();
mop |= MO_ALIGN;
}
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
- tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop));
+ tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop));
}
return mop;
}
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* LDRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
break;
tcg_debug_assert((datalo & 1) == 0);
tcg_debug_assert(datahi == datalo + 1);
/* STRD requires alignment; double-check that. */
- if (get_alignment_bits(opc) >= MO_64) {
+ if (memop_alignment_bits(opc) >= MO_64) {
if (h.index < 0) {
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
} else {
* Otherwise, test for at least natural alignment and defer
* everything else to the helper functions.
*/
- if (s_bits != get_alignment_bits(opc)) {
+ if (s_bits != memop_alignment_bits(opc)) {
tcg_debug_assert(check_fit_tl(a_mask, 13));
tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
{
- unsigned a_bits = get_alignment_bits(op);
+ unsigned a_bits = memop_alignment_bits(op);
check_max_alignment(a_bits);
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
/* In serial mode, reduce atomicity. */
TCGv_i64 ext_addr = NULL;
TCGOpcode opc;
- check_max_alignment(get_alignment_bits(memop));
+ check_max_alignment(memop_alignment_bits(memop));
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
/* In serial mode, reduce atomicity. */
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
MemOp host_atom, bool allow_two_ops)
{
- MemOp align = get_alignment_bits(opc);
+ MemOp align = memop_alignment_bits(opc);
MemOp size = opc & MO_SIZE;
MemOp half = size ? size - 1 : 0;
MemOp atom = opc & MO_ATOM_MASK;