TCGv src1;
mop |= MO_ALIGN;
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
decode_save_opc(ctx, 0);
src1 = get_address(ctx, a->rs1, 0);
TCGLabel *l2 = gen_new_label();
mop |= MO_ALIGN;
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
decode_save_opc(ctx, 0);
src1 = get_address(ctx, a->rs1, 0);
} else {
memop |= MO_ATOM_IFALIGN;
}
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
} else {
memop |= MO_ATOM_IFALIGN;
}
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
decode_save_opc(ctx, 0);
addr = get_address(ctx, a->rs1, a->imm);
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
REQUIRE_FPU;
REQUIRE_EXT(ctx, RVF);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
{
bool out;
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
{
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
if (ctx->cfg_ptr->ext_zama16b) {
memop |= MO_ATOM_WITHIN16;
}
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2);
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop);
TCGv_i64 desth = get_gpr(ctx, a->rd == 0 ? 0 : a->rd + 1, EXT_NONE);
MemOp memop = MO_ALIGN | MO_UO;
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_concat_i64_i128(src2, src2l, src2h);
tcg_gen_concat_i64_i128(dest, destl, desth);
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_addi_tl(addr, sp, stack_adj - reg_size);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
for (i = X_Sn + 11; i >= 0; i--) {
if (reg_bitmap & (1 << i)) {
TCGv dest = dest_gpr(ctx, i);
tcg_gen_subi_tl(addr, sp, reg_size);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
for (i = X_Sn + 11; i >= 0; i--) {
if (reg_bitmap & (1 << i)) {
TCGv val = get_gpr(ctx, i, EXT_NONE);
REQUIRE_FPU;
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
decode_save_opc(ctx, 0);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
REQUIRE_FPU;
REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
decode_save_opc(ctx, 0);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
if (a->imm) {
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
src1 = get_address(ctx, a->rs1, 0);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx), memop);
gen_set_gpr(ctx, a->rd, dest);
return true;
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
src1 = get_address(ctx, a->rs1, 0);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx), memop);
gen_set_gpr(ctx, a->rd, dest);
return true;
TCGv_i64 rd = cpu_fpr[a->rd];
TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_ld_i64(rd, addr, ctx->mem_idx, memop);
if ((memop & MO_SIZE) == MO_32) {
gen_nanbox_s(rd, rd);
TCGv_i64 rd = cpu_fpr[a->rd];
TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_st_i64(rd, addr, ctx->mem_idx, memop);
return true;
TCGv rd = dest_gpr(ctx, a->rd);
TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
tcg_gen_addi_tl(rs1, rs1, imm);
gen_set_gpr(ctx, a->rd, rd);
TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
tcg_gen_addi_tl(rs1, rs1, imm);
gen_set_gpr(ctx, a->rs1, rs1);
TCGv rd = dest_gpr(ctx, a->rd);
TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
gen_set_gpr(ctx, a->rd, rd);
TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
TCGv addr = get_th_address_indexed(ctx, a->rs1, a->rs2, a->imm2, zext_offs);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
return true;
addr1 = get_address(ctx, a->rs, imm);
addr2 = get_address(ctx, a->rs, memop_size(memop) + imm);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_ld_tl(t1, addr1, ctx->mem_idx, memop);
tcg_gen_qemu_ld_tl(t2, addr2, ctx->mem_idx, memop);
gen_set_gpr(ctx, a->rd1, t1);
addr1 = get_address(ctx, a->rs, imm);
addr2 = get_address(ctx, a->rs, memop_size(memop) + imm);
- memop |= MO_TE;
+ memop |= mo_endian(ctx);
tcg_gen_qemu_st_tl(data1, addr1, ctx->mem_idx, memop);
tcg_gen_qemu_st_tl(data2, addr2, ctx->mem_idx, memop);
return true;
return ctx->misa_ext & ext;
}
+static inline MemOp mo_endian(DisasContext *ctx)
+{
+ /*
+ * A couple of bits in MSTATUS set the endianness:
+ * - MSTATUS_UBE (User-mode),
+ * - MSTATUS_SBE (Supervisor-mode),
+ * - MSTATUS_MBE (Machine-mode)
+ * but we don't implement that yet.
+ */
+ return MO_TE;
+}
+
#ifdef TARGET_RISCV32
#define get_xl(ctx) MXL_RV32
#elif defined(CONFIG_USER_ONLY)
#define get_address_xl(ctx) ((ctx)->address_xl)
#endif
-#define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE)
+#define mxl_memop(ctx) ((get_xl(ctx) + 1) | mo_endian(ctx))
/* The word size for this machine mode. */
static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE);
MemOp size = mop & MO_SIZE;
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
if (ctx->cfg_ptr->ext_zama16b && size >= MO_32) {
mop |= MO_ATOM_WITHIN16;
} else {
TCGv src1 = get_address(ctx, a->rs1, 0);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
- mop |= MO_TE;
+ mop |= mo_endian(ctx);
decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO);
tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop);