|
| (using an arithmetic right shift) on TCG_TYPE_I32.
- * - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
+ * - extract2 *dest*, *t1*, *t2*, *pos*
- - | For N = {32,64}, extract an N-bit quantity from the concatenation
+ - | For TCG_TYPE_I{N}, extract an N-bit quantity from the concatenation
of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} expander
accepts 0 <= *pos* <= N as inputs. The backend code generator will
not see either 0 or N as inputs for these opcodes.
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
DEF(extract, 1, 1, 2, TCG_OPF_INT)
+DEF(extract2, 1, 2, 1, TCG_OPF_INT)
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
DEF(mul, 1, 2, 0, TCG_OPF_INT)
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
DEF(st8_i32, 0, 2, 1, 0)
DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
-/* shifts/rotates */
-DEF(extract2_i32, 1, 2, 1, 0)
DEF(add2_i32, 2, 4, 0, 0)
DEF(sub2_i32, 2, 4, 0, 0)
DEF(st16_i64, 0, 2, 1, 0)
DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
-/* shifts/rotates */
-DEF(extract2_i64, 1, 2, 1, 0)
/* size changing ops */
DEF(ext_i32_i64, 1, 1, 0, 0)
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-/*
- * Sometimes, knowing what the backend has can produce better code.
- * The exact opcode to check depends on 32- vs. 64-bit.
- */
-#ifdef TARGET_X86_64
-#define INDEX_op_extract2_tl INDEX_op_extract2_i64
-#else
-#define INDEX_op_extract2_tl INDEX_op_extract2_i32
-#endif
-
#define MMX_OFFSET(reg) \
({ assert((reg) >= 0 && (reg) <= 7); \
offsetof(CPUX86State, fpregs[reg].mmx); })
tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
while (vec_len > 8) {
vec_len -= 8;
- if (tcg_op_supported(INDEX_op_extract2_tl, TCG_TYPE_TL, 0)) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_TL, 0)) {
/*
* Load the next byte of the result into the high byte of T.
* TCG does a similar expansion of deposit to shl+extract2; by
uint64_t v2 = arg_info(op->args[2])->val;
int shr = op->args[3];
- if (op->opc == INDEX_op_extract2_i64) {
- v1 >>= shr;
- v2 <<= 64 - shr;
- } else {
+ if (ctx->type == TCG_TYPE_I32) {
v1 = (uint32_t)v1 >> shr;
v2 = (uint64_t)((int32_t)v2 << (32 - shr));
+ } else {
+ v1 >>= shr;
+ v2 <<= 64 - shr;
}
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
}
case INDEX_op_extract:
done = fold_extract(&ctx, op);
break;
- CASE_OP_32_64(extract2):
+ case INDEX_op_extract2:
done = fold_extract2(&ctx, op);
break;
case INDEX_op_ext_i32_i64:
t1 = tcg_temp_ebb_new_i32();
- if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
if (ofs + len == 32) {
tcg_gen_shli_i32(t1, arg1, len);
tcg_gen_extract2_i32(ret, t1, arg2, len);
tcg_gen_mov_i32(ret, ah);
} else if (al == ah) {
tcg_gen_rotri_i32(ret, al, ofs);
- } else if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
+ } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
+ tcg_gen_op4i_i32(INDEX_op_extract2, ret, al, ah, ofs);
} else {
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
tcg_gen_shri_i32(t0, al, ofs);
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
}
} else if (right) {
- if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
tcg_gen_extract2_i32(TCGV_LOW(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
} else {
tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
}
} else {
- if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
tcg_gen_extract2_i32(TCGV_HIGH(ret),
TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
} else {
t1 = tcg_temp_ebb_new_i64();
- if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
if (ofs + len == 64) {
tcg_gen_shli_i64(t1, arg1, len);
tcg_gen_extract2_i64(ret, t1, arg2, len);
tcg_gen_mov_i64(ret, ah);
} else if (al == ah) {
tcg_gen_rotri_i64(ret, al, ofs);
- } else if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
- tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
+ } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
+ tcg_gen_op4i_i64(INDEX_op_extract2, ret, al, ah, ofs);
} else {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
tcg_gen_shri_i64(t0, al, ofs);
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
- OUTOP(INDEX_op_extract2_i32, TCGOutOpExtract2, outop_extract2),
- OUTOP(INDEX_op_extract2_i64, TCGOutOpExtract2, outop_extract2),
+ OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
}
break;
- case INDEX_op_extract2_i32:
- case INDEX_op_extract2_i64:
+ case INDEX_op_extract2:
{
const TCGOutOpExtract2 *out = &outop_extract2;