- | *t0* = *t1*
| Move *t1* to *t0*.
- * - bswap16_i32/i64 *t0*, *t1*, *flags*
+ * - bswap16 *t0*, *t1*, *flags*
- | 16 bit byte swap on the low bits of a 32/64 bit input.
|
DEF(add, 1, 2, 0, TCG_OPF_INT)
DEF(and, 1, 2, 0, TCG_OPF_INT)
DEF(andc, 1, 2, 0, TCG_OPF_INT)
+DEF(bswap16, 1, 1, 1, TCG_OPF_INT)
DEF(clz, 1, 2, 0, TCG_OPF_INT)
DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
DEF(ctz, 1, 2, 0, TCG_OPF_INT)
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
DEF(setcond2_i32, 1, 4, 1, 0)
-DEF(bswap16_i32, 1, 1, 1, 0)
DEF(bswap32_i32, 1, 1, 1, 0)
/* load/store */
DEF(extrl_i64_i32, 1, 1, 0, 0)
DEF(extrh_i64_i32, 1, 1, 0, 0)
-DEF(bswap16_i64, 1, 1, 1, 0)
DEF(bswap32_i64, 1, 1, 1, 0)
DEF(bswap64_i64, 1, 1, 1, 0)
case INDEX_op_ctpop:
return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
- CASE_OP_32_64(bswap16):
+ case INDEX_op_bswap16:
x = bswap16(x);
return y & TCG_BSWAP_OS ? (int16_t)x : x;
z_mask = t1->z_mask;
switch (op->opc) {
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap16:
z_mask = bswap16(z_mask);
sign = INT16_MIN;
break;
case INDEX_op_brcond2_i32:
done = fold_brcond2(&ctx, op);
break;
- CASE_OP_32_64(bswap16):
+ case INDEX_op_bswap16:
CASE_OP_32_64(bswap32):
case INDEX_op_bswap64_i64:
done = fold_bswap(&ctx, op);
/* Only one extension flag may be present. */
tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
- if (tcg_op_supported(INDEX_op_bswap16_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags);
+ if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I32, 0)) {
+ tcg_gen_op3i_i32(INDEX_op_bswap16, ret, arg, flags);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
} else {
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
}
- } else if (tcg_op_supported(INDEX_op_bswap16_i64, TCG_TYPE_I64, 0)) {
- tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags);
+ } else if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I64, 0)) {
+ tcg_gen_op3i_i64(INDEX_op_bswap16, ret, arg, flags);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
- OUTOP(INDEX_op_bswap16_i32, TCGOutOpBswap, outop_bswap16),
- OUTOP(INDEX_op_bswap16_i64, TCGOutOpBswap, outop_bswap16),
+ OUTOP(INDEX_op_bswap16, TCGOutOpBswap, outop_bswap16),
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
i = 1;
}
break;
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap16:
case INDEX_op_bswap32_i32:
case INDEX_op_bswap32_i64:
case INDEX_op_bswap64_i64:
}
break;
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap16:
{
const TCGOutOpBswap *out =
container_of(all_outop[op->opc], TCGOutOpBswap, base);
tci_write_reg64(regs, r1, r0, T1 - T2);
break;
#endif
- CASE_32_64(bswap16)
+ case INDEX_op_bswap16:
tci_args_rr(insn, &r0, &r1);
regs[r0] = bswap16(regs[r1]);
break;
op_name, str_r(r0), str_r(r1), s2);
break;
+ case INDEX_op_bswap16:
case INDEX_op_ctpop:
case INDEX_op_mov:
case INDEX_op_neg:
case INDEX_op_not:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_bswap16_i32:
- case INDEX_op_bswap16_i64:
case INDEX_op_bswap32_i32:
case INDEX_op_bswap32_i64:
case INDEX_op_bswap64_i64:
static void tgen_bswap16(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, unsigned flags)
{
- tcg_out_op_rr(s, INDEX_op_bswap16_i32, a0, a1);
+ tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1);
if (flags & TCG_BSWAP_OS) {
tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
}