|
| *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
- * - extract_i32/i64 *dest*, *t1*, *pos*, *len*
+ * - extract *dest*, *t1*, *pos*, *len*
sextract_i32/i64 *dest*, *t1*, *pos*, *len*
to the left with zeros; for sextract_*, the result will be extended
to the left with copies of the bitfield sign bit at *pos* + *len* - 1.
|
- | For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
+ | For example, "sextract dest, t1, 8, 4" indicates a 4-bit field
at bit 8. This operation would be equivalent to
|
| *dest* = (*t1* << 20) >> 28
|
- | (using an arithmetic right shift).
+ | (using an arithmetic right shift) on TCG_TYPE_I32.
* - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
DEF(divu, 1, 2, 0, TCG_OPF_INT)
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
+DEF(extract, 1, 1, 2, TCG_OPF_INT)
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
DEF(mul, 1, 2, 0, TCG_OPF_INT)
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
DEF(st_i32, 0, 2, 1, 0)
/* shifts/rotates */
DEF(deposit_i32, 1, 2, 2, 0)
-DEF(extract_i32, 1, 1, 2, 0)
DEF(sextract_i32, 1, 1, 2, 0)
DEF(extract2_i32, 1, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* shifts/rotates */
DEF(deposit_i64, 1, 2, 2, 0)
-DEF(extract_i64, 1, 1, 2, 0)
DEF(sextract_i64, 1, 1, 2, 0)
DEF(extract2_i64, 1, 2, 1, 0)
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
{
- TCGOpcode uext_opc = 0, sext_opc = 0;
+ TCGOpcode sext_opc = 0;
TCGCond cond = op->args[3];
TCGArg ret, src1, src2;
TCGOp *op2;
switch (ctx->type) {
case TCG_TYPE_I32:
- if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
- uext_opc = INDEX_op_extract_i32;
- }
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
sext_opc = INDEX_op_sextract_i32;
}
break;
case TCG_TYPE_I64:
- if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
- uext_opc = INDEX_op_extract_i64;
- }
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
sext_opc = INDEX_op_sextract_i64;
}
op->args[2] = sh;
op->args[3] = 1;
return;
- } else if (sh && uext_opc) {
- op->opc = uext_opc;
+ } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) {
+ op->opc = INDEX_op_extract;
op->args[1] = src1;
op->args[2] = sh;
op->args[3] = 1;
case INDEX_op_eqv_vec:
done = fold_eqv(&ctx, op);
break;
- CASE_OP_32_64(extract):
+ case INDEX_op_extract:
done = fold_extract(&ctx, op);
break;
CASE_OP_32_64(extract2):
}
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) {
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, ofs, len);
return;
}
if (ofs == 0) {
/* Assume that zero-extension, if available, is cheaper than a shift. */
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, 0, ofs + len);
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, 0, ofs + len);
tcg_gen_shri_i32(ret, ret, ofs);
return;
}
}
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) {
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, ofs, len);
return;
}
if (ofs == 0) {
/* Assume that zero-extension, if available, is cheaper than a shift. */
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, 0, ofs + len);
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, 0, ofs + len);
tcg_gen_shri_i64(ret, ret, ofs);
return;
}
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
- OUTOP(INDEX_op_extract_i32, TCGOutOpExtract, outop_extract),
- OUTOP(INDEX_op_extract_i64, TCGOutOpExtract, outop_extract),
+ OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
case INDEX_op_add:
case INDEX_op_and:
case INDEX_op_brcond:
+ case INDEX_op_extract:
case INDEX_op_mov:
case INDEX_op_movcond:
case INDEX_op_negsetcond:
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
- case INDEX_op_extract_i32:
case INDEX_op_sextract_i32:
case INDEX_op_deposit_i32:
return true;
case INDEX_op_st_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extract_i64:
case INDEX_op_sextract_i64:
case INDEX_op_deposit_i64:
return TCG_TARGET_REG_BITS == 64;
}
break;
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
+ case INDEX_op_extract:
{
const TCGOutOpExtract *out =
container_of(all_outop[op->opc], TCGOutOpExtract, base);
#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
+#define extract_tr glue(extract, TCG_TARGET_REG_BITS)
/*
* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
break;
- case INDEX_op_extract_i32:
+ case INDEX_op_extract:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = extract32(regs[r1], pos, len);
+ regs[r0] = extract_tr(regs[r1], pos, len);
break;
case INDEX_op_sextract_i32:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
break;
- case INDEX_op_extract_i64:
- tci_args_rrbb(insn, &r0, &r1, &pos, &len);
- regs[r0] = extract64(regs[r1], pos, len);
- break;
case INDEX_op_sextract_i64:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
regs[r0] = sextract64(regs[r1], pos, len);
op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
break;
- case INDEX_op_extract_i32:
- case INDEX_op_extract_i64:
+ case INDEX_op_extract:
case INDEX_op_sextract_i32:
case INDEX_op_sextract_i64:
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
TCGReg rs, unsigned pos, unsigned len)
{
- TCGOpcode opc = type == TCG_TYPE_I32 ?
- INDEX_op_extract_i32 :
- INDEX_op_extract_i64;
- tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
+ tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len);
}
static const TCGOutOpExtract outop_extract = {