tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
}
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
static const uint32_t sync[] = {
[0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST,
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
break;
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
}
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
if (use_armv7_instructions) {
tcg_out32(s, INSN_DMB_ISH);
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
break;
- case INDEX_op_mb:
- tcg_out_mb(s, args[0]);
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
}
}
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Given the strength of x86 memory ordering, we only need care for
store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
break;
#endif
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
* TCG intrinsics
*/
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Baseline LoongArch only has the full barrier, unfortunately. */
tcg_out_opc_dbar(s, 0);
TCGArg a3 = args[3];
switch (opc) {
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
break;
}
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
static const MIPSInsn sync[] = {
/* Note that SYNC_MB is a slightly weaker than SYNC 0,
}
break;
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
.out = tgen_brcond2,
};
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
uint32_t insn;
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
break;
- case INDEX_op_mb:
- tcg_out_mb(s, args[0]);
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
tcg_out_call_int(s, arg, false);
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
tcg_insn_unit insn = OPC_FENCE;
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
break;
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
.out_rr = tgen_not,
};
+static void tcg_out_mb(TCGContext *s, unsigned a0)
+{
+ /*
+ * The host memory model is quite strong, we simply need to
+ * serialize the instruction stream.
+ */
+ if (a0 & TCG_MO_ST_LD) {
+ /* fast-bcr-serialization facility (45) is present */
+ tcg_out_insn(s, RR, BCR, 14, 0);
+ }
+}
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
break;
- case INDEX_op_mb:
- /* The host memory model is quite strong, we simply need to
- serialize the instruction stream. */
- if (args[0] & TCG_MO_ST_LD) {
- /* fast-bcr-serialization facility (45) is present */
- tcg_out_insn(s, RR, BCR, 14, 0);
- }
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
tcg_out_nop(s);
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
tcg_out_ldst(s, a0, a1, a2, STX);
break;
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
static void tcg_out_goto_tb(TCGContext *s, int which);
+static void tcg_out_mb(TCGContext *s, unsigned bar);
static void tcg_out_set_carry(TCGContext *s);
static void tcg_out_set_borrow(TCGContext *s);
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
case INDEX_op_goto_tb:
tcg_out_goto_tb(s, op->args[0]);
break;
+ case INDEX_op_mb:
+ tcg_out_mb(s, op->args[0]);
+ break;
case INDEX_op_dup2_vec:
if (tcg_reg_alloc_dup2(s, op)) {
break;
.out = tgen_setcond2,
};
+static void tcg_out_mb(TCGContext *s, unsigned a0)
+{
+ tcg_out_op_v(s, INDEX_op_mb);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
}
break;
- case INDEX_op_mb:
- tcg_out_op_v(s, opc);
- break;
-
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */