VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
VRRc_VCH = 0xe7fb, /* " */
VRRc_VCHL = 0xe7f9, /* " */
+ VRRc_VERLLV = 0xe773,
+ VRRc_VESLV = 0xe770,
+ VRRc_VESRAV = 0xe77a,
+ VRRc_VESRLV = 0xe778,
VRRc_VML = 0xe7a2,
VRRc_VN = 0xe768,
VRRc_VNC = 0xe769,
VRRc_VX = 0xe76d,
VRRf_VLVGP = 0xe762,
+ VRSa_VERLL = 0xe733,
+ VRSa_VESL = 0xe730,
+ VRSa_VESRA = 0xe73a,
+ VRSa_VESRL = 0xe738,
VRSb_VLVG = 0xe722,
VRSc_VLGV = 0xe721,
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
}
+static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
+ intptr_t d2, TCGReg b2, TCGReg v3, int m4)
+{
+ tcg_debug_assert(is_vector_reg(v1));
+ tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
+ tcg_debug_assert(is_general_reg(b2));
+ tcg_debug_assert(is_vector_reg(v3));
+ tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
+ tcg_out16(s, b2 << 12 | d2);
+ tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
+}
+
static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
intptr_t d2, TCGReg b2, TCGReg r3, int m4)
{
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
break;
+ case INDEX_op_shli_vec:
+ tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
+ break;
+ case INDEX_op_shri_vec:
+ tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
+ break;
+ case INDEX_op_sari_vec:
+ tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
+ break;
+ case INDEX_op_rotli_vec:
+ tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
+ break;
+ case INDEX_op_shls_vec:
+ tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
+ break;
+ case INDEX_op_shrs_vec:
+ tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
+ break;
+ case INDEX_op_sars_vec:
+ tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
+ break;
+ case INDEX_op_rotls_vec:
+ tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
+ break;
+ case INDEX_op_shlv_vec:
+ tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
+ break;
+ case INDEX_op_shrv_vec:
+ tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
+ break;
+ case INDEX_op_sarv_vec:
+ tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
+ break;
+ case INDEX_op_rotlv_vec:
+ tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
+ break;
+
case INDEX_op_cmp_vec:
switch ((TCGCond)args[3]) {
case TCG_COND_EQ:
case INDEX_op_not_vec:
case INDEX_op_or_vec:
case INDEX_op_orc_vec:
+ case INDEX_op_rotli_vec:
+ case INDEX_op_rotls_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_sars_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_shls_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_shrs_vec:
+ case INDEX_op_shrv_vec:
case INDEX_op_sub_vec:
case INDEX_op_xor_vec:
return 1;
case INDEX_op_cmp_vec:
+ case INDEX_op_rotrv_vec:
return -1;
case INDEX_op_mul_vec:
return vece < MO_64;
TCGArg a0, ...)
{
va_list va;
- TCGv_vec v0, v1, v2;
+ TCGv_vec v0, v1, v2, t0;
va_start(va, a0);
v0 = temp_tcgv_vec(arg_temp(a0));
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
break;
+ case INDEX_op_rotrv_vec:
+ t0 = tcg_temp_new_vec(type);
+ tcg_gen_neg_vec(vece, t0, v2);
+ tcg_gen_rotlv_vec(vece, v0, v1, t0);
+ tcg_temp_free_vec(t0);
+ break;
+
default:
g_assert_not_reached();
}
case INDEX_op_abs_vec:
case INDEX_op_neg_vec:
case INDEX_op_not_vec:
+ case INDEX_op_rotli_vec:
+ case INDEX_op_sari_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_shri_vec:
return C_O1_I1(v, v);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
case INDEX_op_xor_vec:
case INDEX_op_cmp_vec:
case INDEX_op_mul_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
return C_O1_I2(v, v, v);
+ case INDEX_op_rotls_vec:
+ case INDEX_op_shls_vec:
+ case INDEX_op_shrs_vec:
+ case INDEX_op_sars_vec:
+ return C_O1_I2(v, v, r);
default:
g_assert_not_reached();