A few (7) new IROps are introduced.
Patch by Christian Borntraeger (borntraeger@de.ibm.com).
Fixes bugzilla #274695.
git-svn-id: svn://svn.valgrind.org/vex/trunk@2496
S390_CC_OP_BFP_TDC_32 = 32,
S390_CC_OP_BFP_TDC_64 = 33,
S390_CC_OP_BFP_TDC_128 = 34,
- S390_CC_OP_SET = 35
+ S390_CC_OP_SET = 35,
+ S390_CC_OP_BFP_32_TO_UINT_32 = 36,
+ S390_CC_OP_BFP_64_TO_UINT_32 = 37,
+ S390_CC_OP_BFP_128_TO_UINT_32 = 38,
+ S390_CC_OP_BFP_32_TO_UINT_64 = 39,
+ S390_CC_OP_BFP_64_TO_UINT_64 = 40,
+ S390_CC_OP_BFP_128_TO_UINT_64 = 41
};
/*------------------------------------------------------------*/
| S390_CC_OP_BFP_TDC_64 | F value | Z class | |
| S390_CC_OP_BFP_TDC_128 | F value hi 64 bits | F value low 64 bits | Z class |
| S390_CC_OP_SET | Z condition code | | |
+ | S390_CC_OP_BFP_32_TO_UINT_32 | F source | | |
+ | S390_CC_OP_BFP_64_TO_UINT_32 | F source | | |
+ | S390_CC_OP_BFP_128_TO_UINT_32 | F source hi 64 bits | F source low 64 bits | |
+ | S390_CC_OP_BFP_32_TO_UINT_64 | F source | | |
+ | S390_CC_OP_BFP_64_TO_UINT_64 | F source | | |
+ | S390_CC_OP_BFP_128_TO_UINT_64 | F source hi 64 bits | F source low 64 bits | |
+--------------------------------+-----------------------+----------------------+-------------+
*/
psw >> 28; /* cc */ \
})
+#define S390_CC_FOR_BFP_UCONVERT(opcode,cc_dep1) \
+({ \
+ __asm__ volatile ( \
+ opcode ",0,%[op],0,0\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [op] "f"(cc_dep1) \
+ : "cc", "r0");\
+ psw >> 28; /* cc */ \
+})
+
#define S390_CC_FOR_BFP128_CONVERT(opcode,hi,lo) \
({ \
__asm__ volatile ( \
psw >> 28; /* cc */ \
})
+#define S390_CC_FOR_BFP128_UCONVERT(opcode,hi,lo) \
+({ \
+ __asm__ volatile ( \
+ "ldr 4,%[high]\n\t" \
+ "ldr 6,%[low]\n\t" \
+ opcode ",0,4,0,0\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [high] "f"(hi), [low] "f"(lo) \
+ : "cc", "r0", "f4", "f6");\
+ psw >> 28; /* cc */ \
+})
+
#define S390_CC_FOR_BFP_TDC(opcode,cc_dep1,cc_dep2) \
({ \
__asm__ volatile ( \
case S390_CC_OP_SET:
return cc_dep1;
+ case S390_CC_OP_BFP_32_TO_UINT_32:
+ return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb39c0000", cc_dep1);
+
+ case S390_CC_OP_BFP_64_TO_UINT_32:
+ return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb39d0000", cc_dep1);
+
+ case S390_CC_OP_BFP_128_TO_UINT_32:
+ return S390_CC_FOR_BFP128_UCONVERT(".insn rrf,0xb39e0000", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_BFP_32_TO_UINT_64:
+ return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb3ac0000", cc_dep1);
+
+ case S390_CC_OP_BFP_64_TO_UINT_64:
+ return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb3ad0000", cc_dep1);
+
+ case S390_CC_OP_BFP_128_TO_UINT_64:
+ return S390_CC_FOR_BFP128_UCONVERT(".insn rrf,0xb3ae0000", cc_dep1, cc_dep2);
+
+
default:
break;
}
s390_disasm(ENC4(MNM, FPR, FPR, FPR), mnm, r1, r3, r2);
}
+static void
+s390_format_RRF_UUFR(HChar *(*irgen)(UChar m3, UChar m4, UChar r1, UChar r2),
+ UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(m3, m4, r1, r2);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), mnm, r1, m3, r2, m4);
+}
+
+static void
+s390_format_RRF_UURF(HChar *(*irgen)(UChar m3, UChar m4, UChar r1, UChar r2),
+ UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ HChar *mnm = irgen(m3, m4, r1, r2);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), mnm, r1, m3, r2, m4);
+}
+
+
static void
s390_format_RRF_U0RR(HChar *(*irgen)(UChar m3, UChar r1, UChar r2),
UChar m3, UChar r1, UChar r2, Int xmnm_kind)
return "cdgbr";
}
+static HChar *
+s390_irgen_CELFBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_fpr_w0(r1, binop(Iop_I32UtoF32, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op2)));
+
+ return "celfbr";
+}
+
+static HChar *
+s390_irgen_CDLFBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_fpr_dw0(r1, unop(Iop_I32UtoF64, mkexpr(op2)));
+
+ return "cdlfbr";
+}
+
+static HChar *
+s390_irgen_CELGBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_fpr_w0(r1, binop(Iop_I64UtoF32, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op2)));
+
+ return "celgbr";
+}
+
+static HChar *
+s390_irgen_CDLGBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_fpr_dw0(r1, binop(Iop_I64UtoF64, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op2)));
+
+ return "cdlgbr";
+}
+
+static HChar *
+s390_irgen_CLFEBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op, get_fpr_w0(r2));
+ assign(result, binop(Iop_F32toI32U, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_32_TO_UINT_32, op);
+
+ return "clfebr";
+}
+
+static HChar *
+s390_irgen_CLFDBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op, get_fpr_dw0(r2));
+ assign(result, binop(Iop_F64toI32U, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_64_TO_UINT_32, op);
+
+ return "clfdbr";
+}
+
+static HChar *
+s390_irgen_CLGEBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F32);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op, get_fpr_w0(r2));
+ assign(result, binop(Iop_F32toI64U, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_32_TO_UINT_64, op);
+
+ return "clgebr";
+}
+
+static HChar *
+s390_irgen_CLGDBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F64);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op, get_fpr_dw0(r2));
+ assign(result, binop(Iop_F64toI64U, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putF(S390_CC_OP_BFP_64_TO_UINT_64, op);
+
+ return "clgdbr";
+}
+
static HChar *
s390_irgen_CFEBR(UChar r3, UChar r1, UChar r2)
{
return "cxfbr";
}
+static HChar *
+s390_irgen_CXLFBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_fpr_pair(r1, unop(Iop_I32UtoF128, mkexpr(op2)));
+
+ return "cxlfbr";
+}
+
+
static HChar *
s390_irgen_CXGBR(UChar r1, UChar r2)
{
return "cxgbr";
}
+static HChar *
+s390_irgen_CXLGBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_fpr_pair(r1, unop(Iop_I64UtoF128, mkexpr(op2)));
+
+ return "cxlgbr";
+}
+
static HChar *
s390_irgen_CFXBR(UChar r3, UChar r1, UChar r2)
{
return "cfxbr";
}
+static HChar *
+s390_irgen_CLFXBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_I32);
+
+ assign(op, get_fpr_pair(r2));
+ assign(result, binop(Iop_F128toI32U, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_128_TO_UINT_32, op);
+
+ return "clfxbr";
+}
+
+
static HChar *
s390_irgen_CGXBR(UChar r3, UChar r1, UChar r2)
{
return "cgxbr";
}
+static HChar *
+s390_irgen_CLGXBR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ IRTemp op = newTemp(Ity_F128);
+ IRTemp result = newTemp(Ity_I64);
+
+ assign(op, get_fpr_pair(r2));
+ assign(result, binop(Iop_F128toI64U, mkU32(encode_rounding_mode(m3)),
+ mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_put1f128(S390_CC_OP_BFP_128_TO_UINT_64, op);
+
+ return "clgxbr";
+}
+
static HChar *
s390_irgen_DXBR(UChar r1, UChar r2)
{
} RRF;
struct {
unsigned int op : 16;
- unsigned int r3 : 4;
+ unsigned int m3 : 4;
unsigned int m4 : 4;
unsigned int r1 : 4;
unsigned int r2 : 4;
case 0xb384: s390_format_RRE_R0(s390_irgen_SFPC, ovl.fmt.RRE.r1); goto ok;
case 0xb385: /* SFASR */ goto unimplemented;
case 0xb38c: s390_format_RRE_R0(s390_irgen_EFPC, ovl.fmt.RRE.r1); goto ok;
- case 0xb390: /* CELFBR */ goto unimplemented;
- case 0xb391: /* CDLFBR */ goto unimplemented;
- case 0xb392: /* CXLFBR */ goto unimplemented;
+ case 0xb390: s390_format_RRF_UUFR(s390_irgen_CELFBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb391: s390_format_RRF_UUFR(s390_irgen_CDLFBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb392: s390_format_RRF_UUFR(s390_irgen_CXLFBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
case 0xb394: s390_format_RRE_FR(s390_irgen_CEFBR, ovl.fmt.RRE.r1,
ovl.fmt.RRE.r2); goto ok;
case 0xb395: s390_format_RRE_FR(s390_irgen_CDFBR, ovl.fmt.RRE.r1,
case 0xb39a: s390_format_RRF_U0RF(s390_irgen_CFXBR, ovl.fmt.RRF3.r3,
ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
goto ok;
- case 0xb3a0: /* CELGBR */ goto unimplemented;
- case 0xb3a1: /* CDLGBR */ goto unimplemented;
- case 0xb3a2: /* CXLGBR */ goto unimplemented;
+ case 0xb39c: s390_format_RRF_UURF(s390_irgen_CLFEBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb39d: s390_format_RRF_UURF(s390_irgen_CLFDBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb39e: s390_format_RRF_UURF(s390_irgen_CLFXBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb3a0: s390_format_RRF_UUFR(s390_irgen_CELGBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb3a1: s390_format_RRF_UUFR(s390_irgen_CDLGBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb3a2: s390_format_RRF_UUFR(s390_irgen_CXLGBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
case 0xb3a4: s390_format_RRE_FR(s390_irgen_CEGBR, ovl.fmt.RRE.r1,
ovl.fmt.RRE.r2); goto ok;
case 0xb3a5: s390_format_RRE_FR(s390_irgen_CDGBR, ovl.fmt.RRE.r1,
case 0xb3aa: s390_format_RRF_U0RF(s390_irgen_CGXBR, ovl.fmt.RRF3.r3,
ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
goto ok;
+ case 0xb3ac: s390_format_RRF_UURF(s390_irgen_CLGEBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb3ad: s390_format_RRF_UURF(s390_irgen_CLGDBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb3ae: s390_format_RRF_UURF(s390_irgen_CLGXBR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
case 0xb3b4: /* CEFR */ goto unimplemented;
case 0xb3b5: /* CDFR */ goto unimplemented;
case 0xb3b6: /* CXFR */ goto unimplemented;
}
+static UChar *
+emit_RRF2(UChar *p, UInt op, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ ULong the_insn = op;
+
+ the_insn |= ((ULong)m3) << 12;
+ the_insn |= ((ULong)m4) << 8;
+ the_insn |= ((ULong)r1) << 4;
+ the_insn |= ((ULong)r2) << 0;
+
+ return emit_4bytes(p, the_insn);
+}
+
+
static UChar *
emit_RRF3(UChar *p, UInt op, UChar r3, UChar r1, UChar r2)
{
}
+static UChar *
+s390_emit_CELFBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "celfbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3900000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLFBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlfbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3910000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLFBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlfbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3920000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CELGBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "celgbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3a00000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLGBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlgbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3a10000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLGBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlgbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3a20000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFEBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfebr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb39c0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFDBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfdbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb39d0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFXBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfxbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb39e0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGEBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgebr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3ac0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGDBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgdbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3ad0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGXBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgxbr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb3ae0000, m3, m4, r1, r2);
+}
+
+
static UChar *
s390_emit_CFEBR(UChar *p, UChar r3, UChar r1, UChar r2)
{
case S390_BFP_I64_TO_F32:
case S390_BFP_I64_TO_F64:
case S390_BFP_I64_TO_F128: op = "v-i2f"; break;
+ case S390_BFP_U32_TO_F32:
+ case S390_BFP_U32_TO_F64:
+ case S390_BFP_U32_TO_F128:
+ case S390_BFP_U64_TO_F32:
+ case S390_BFP_U64_TO_F64:
+ case S390_BFP_U64_TO_F128: op = "v-u2f"; break;
case S390_BFP_F32_TO_I32:
case S390_BFP_F32_TO_I64:
case S390_BFP_F64_TO_I32:
case S390_BFP_F64_TO_I64:
case S390_BFP_F128_TO_I32:
case S390_BFP_F128_TO_I64: op = "v-f2i"; break;
+ case S390_BFP_F32_TO_U32:
+ case S390_BFP_F32_TO_U64:
+ case S390_BFP_F64_TO_U32:
+ case S390_BFP_F64_TO_U64:
+ case S390_BFP_F128_TO_U32:
+ case S390_BFP_F128_TO_U64: op = "v-f2u"; break;
case S390_BFP_F32_TO_F64:
case S390_BFP_F32_TO_F128:
case S390_BFP_F64_TO_F32:
case S390_BFP_SQRT: op = "v-fsqrt"; break;
case S390_BFP_I32_TO_F128:
case S390_BFP_I64_TO_F128: op = "v-i2f"; break;
+ case S390_BFP_U32_TO_F128:
+ case S390_BFP_U64_TO_F128: op = "v-u2f"; break;
case S390_BFP_F128_TO_I32:
case S390_BFP_F128_TO_I64: op = "v-f2i"; break;
+ case S390_BFP_F128_TO_U32:
+ case S390_BFP_F128_TO_U64: op = "v-f2u"; break;
case S390_BFP_F32_TO_F128:
case S390_BFP_F64_TO_F128:
case S390_BFP_F128_TO_F32:
case S390_BFP_I32_TO_F32:
case S390_BFP_I32_TO_F64:
case S390_BFP_I32_TO_F128:
+ case S390_BFP_U32_TO_F32:
+ case S390_BFP_U32_TO_F64:
+ case S390_BFP_U32_TO_F128:
case S390_BFP_F32_TO_I32:
case S390_BFP_F32_TO_I64:
+ case S390_BFP_F32_TO_U32:
+ case S390_BFP_F32_TO_U64:
case S390_BFP_F32_TO_F64:
case S390_BFP_F32_TO_F128: p += vex_sprintf(p, "4 -> "); goto common;
case S390_BFP_I64_TO_F32:
case S390_BFP_I64_TO_F64:
case S390_BFP_I64_TO_F128:
+ case S390_BFP_U64_TO_F32:
+ case S390_BFP_U64_TO_F64:
+ case S390_BFP_U64_TO_F128:
case S390_BFP_F64_TO_I32:
case S390_BFP_F64_TO_I64:
+ case S390_BFP_F64_TO_U32:
+ case S390_BFP_F64_TO_U64:
case S390_BFP_F64_TO_F32:
case S390_BFP_F64_TO_F128: p += vex_sprintf(p, "8 -> "); goto common;
case S390_BFP_F128_TO_I32:
case S390_BFP_F128_TO_I64:
+ case S390_BFP_F128_TO_U32:
+ case S390_BFP_F128_TO_U64:
case S390_BFP_F128_TO_F32:
case S390_BFP_F128_TO_F64: p += vex_sprintf(p, "16 -> "); goto common;
default:
case S390_INSN_BFP128_CONVERT_FROM:
switch (insn->variant.bfp128_unop.tag) {
case S390_BFP_I32_TO_F128:
+ case S390_BFP_U32_TO_F128:
case S390_BFP_F32_TO_F128: p += vex_sprintf(p, "4 -> "); goto common;
case S390_BFP_I64_TO_F128:
+ case S390_BFP_U64_TO_F128:
case S390_BFP_F64_TO_F128: p += vex_sprintf(p, "8 -> "); goto common;
case S390_BFP_F128_TO_I32:
case S390_BFP_F128_TO_I64:
+ case S390_BFP_F128_TO_U32:
+ case S390_BFP_F128_TO_U64:
case S390_BFP_F128_TO_F32:
case S390_BFP_F128_TO_F64: p += vex_sprintf(p, "16 -> "); goto common;
default:
case S390_BFP_F64_TO_I32: return s390_emit_CFDBR(buf, m3, r1, r2);
case S390_BFP_F32_TO_I64: return s390_emit_CGEBR(buf, m3, r1, r2);
case S390_BFP_F64_TO_I64: return s390_emit_CGDBR(buf, m3, r1, r2);
+
+ /* We leave m4 as 0 - as gcc */
+ case S390_BFP_F32_TO_U32: return s390_emit_CLFEBR(buf, m3, 0, r1, r2);
+ case S390_BFP_F64_TO_U32: return s390_emit_CLFDBR(buf, m3, 0, r1, r2);
+ case S390_BFP_F32_TO_U64: return s390_emit_CLGEBR(buf, m3, 0, r1, r2);
+ case S390_BFP_F64_TO_U64: return s390_emit_CLGDBR(buf, m3, 0, r1, r2);
+
default: break;
}
case S390_BFP_I64_TO_F64: buf = s390_emit_CDGBR(buf, r1, r2); break;
case S390_BFP_I64_TO_F128: buf = s390_emit_CXGBR(buf, r1, r2); break;
+ /* We leave m4 as 0 - as gcc */
+ case S390_BFP_U32_TO_F32: buf = s390_emit_CELFBR(buf, m3, 0, r1, r2); break;
+ case S390_BFP_U32_TO_F64: buf = s390_emit_CDLFBR(buf, m3, 0, r1, r2); break;
+ case S390_BFP_U32_TO_F128: buf = s390_emit_CXLFBR(buf, m3, 0, r1, r2); break;
+ case S390_BFP_U64_TO_F32: buf = s390_emit_CELGBR(buf, m3, 0, r1, r2); break;
+ case S390_BFP_U64_TO_F64: buf = s390_emit_CDLGBR(buf, m3, 0, r1, r2); break;
+ case S390_BFP_U64_TO_F128: buf = s390_emit_CXLGBR(buf, m3, 0, r1, r2); break;
+
case S390_BFP_F32_TO_F64: buf = s390_emit_LDEBR(buf, r1, r2); break;
case S390_BFP_F32_TO_F128: buf = s390_emit_LXEBR(buf, r1, r2); break;
case S390_BFP_F64_TO_F32: buf = s390_emit_LEDBR(buf, r1, r2); break;
switch (insn->variant.bfp128_unop.tag) {
case S390_BFP_I32_TO_F128: buf = s390_emit_CXFBR(buf, r1_hi, r2); break;
case S390_BFP_I64_TO_F128: buf = s390_emit_CXGBR(buf, r1_hi, r2); break;
+ /* Rounding makes no sense -> m3 == 0. m4 is also 0 */
+ case S390_BFP_U32_TO_F128: buf = s390_emit_CXLFBR(buf, 0, 0, r1_hi, r2);
+ break;
+ case S390_BFP_U64_TO_F128: buf = s390_emit_CXLGBR(buf, 0, 0, r1_hi, r2);
+ break;
case S390_BFP_F32_TO_F128: buf = s390_emit_LXEBR(buf, r1_hi, r2); break;
case S390_BFP_F64_TO_F128: buf = s390_emit_LXDBR(buf, r1_hi, r2); break;
default: goto fail;
case S390_BFP_F128_TO_I64:
return s390_emit_CGXBR(buf, rounding_mode, r1, r2_hi);
+ case S390_BFP_F128_TO_U32:
+ return s390_emit_CLFXBR(buf, rounding_mode, 0, r1, r2_hi);
+
+ case S390_BFP_F128_TO_U64:
+ return s390_emit_CLGXBR(buf, rounding_mode, 0, r1, r2_hi);
+
default: break;
}
S390_BFP_I64_TO_F32,
S390_BFP_I64_TO_F64,
S390_BFP_I64_TO_F128,
+ S390_BFP_U32_TO_F32,
+ S390_BFP_U32_TO_F64,
+ S390_BFP_U32_TO_F128,
+ S390_BFP_U64_TO_F32,
+ S390_BFP_U64_TO_F64,
+ S390_BFP_U64_TO_F128,
S390_BFP_F32_TO_I32,
S390_BFP_F32_TO_I64,
+ S390_BFP_F32_TO_U32,
+ S390_BFP_F32_TO_U64,
S390_BFP_F32_TO_F64,
S390_BFP_F32_TO_F128,
S390_BFP_F64_TO_I32,
S390_BFP_F64_TO_I64,
+ S390_BFP_F64_TO_U32,
+ S390_BFP_F64_TO_U64,
S390_BFP_F64_TO_F32,
S390_BFP_F64_TO_F128,
S390_BFP_F128_TO_I32,
S390_BFP_F128_TO_I64,
+ S390_BFP_F128_TO_U32,
+ S390_BFP_F128_TO_U64,
S390_BFP_F128_TO_F32,
S390_BFP_F128_TO_F64
} s390_bfp_unop_t;
case Iop_F32toI32S: bfpop = S390_BFP_F32_TO_I32; goto do_convert;
case Iop_F32toI64S: bfpop = S390_BFP_F32_TO_I64; goto do_convert;
+ case Iop_F32toI32U: bfpop = S390_BFP_F32_TO_U32; goto do_convert;
+ case Iop_F32toI64U: bfpop = S390_BFP_F32_TO_U64; goto do_convert;
case Iop_F64toI32S: bfpop = S390_BFP_F64_TO_I32; goto do_convert;
case Iop_F64toI64S: bfpop = S390_BFP_F64_TO_I64; goto do_convert;
+ case Iop_F64toI32U: bfpop = S390_BFP_F64_TO_U32; goto do_convert;
+ case Iop_F64toI64U: bfpop = S390_BFP_F64_TO_U64; goto do_convert;
case Iop_F128toI32S: bfpop = S390_BFP_F128_TO_I32; goto do_convert_128;
case Iop_F128toI64S: bfpop = S390_BFP_F128_TO_I64; goto do_convert_128;
+ case Iop_F128toI32U: bfpop = S390_BFP_F128_TO_U32; goto do_convert_128;
+ case Iop_F128toI64U: bfpop = S390_BFP_F128_TO_U64; goto do_convert_128;
do_convert: {
s390_round_t rounding_mode;
case Iop_AbsF128: bfpop = S390_BFP_ABS; goto float128_opnd;
case Iop_I32StoF128: bfpop = S390_BFP_I32_TO_F128; goto convert_int;
case Iop_I64StoF128: bfpop = S390_BFP_I64_TO_F128; goto convert_int;
+ case Iop_I32UtoF128: bfpop = S390_BFP_U32_TO_F128; goto convert_int;
+ case Iop_I64UtoF128: bfpop = S390_BFP_U64_TO_F128; goto convert_int;
case Iop_F32toF128: bfpop = S390_BFP_F32_TO_F128; goto convert_float;
case Iop_F64toF128: bfpop = S390_BFP_F64_TO_F128; goto convert_float;
default:
break;
case Iop_I32StoF32: bfpop = S390_BFP_I32_TO_F32; break;
+ case Iop_I32UtoF32: bfpop = S390_BFP_U32_TO_F32; break;
case Iop_I64StoF32: bfpop = S390_BFP_I64_TO_F32; break;
case Iop_I64StoF64: bfpop = S390_BFP_I64_TO_F64; break;
+ case Iop_I64UtoF32: bfpop = S390_BFP_U64_TO_F32; break;
+ case Iop_I64UtoF64: bfpop = S390_BFP_U64_TO_F64; break;
+
default:
goto irreducible;
case Iop_AbsF32:
case Iop_AbsF64: bfpop = S390_BFP_ABS; break;
case Iop_I32StoF64: bfpop = S390_BFP_I32_TO_F64; break;
+ case Iop_I32UtoF64: bfpop = S390_BFP_U32_TO_F64; break;
case Iop_F32toF64: bfpop = S390_BFP_F32_TO_F64; break;
default:
goto irreducible;
}
/* Process operand */
- if (op == Iop_I32StoF64)
+ if (op == Iop_I32StoF64 || op == Iop_I32UtoF64)
h1 = s390_isel_int_expr(env, left);
else if (bfpop == S390_BFP_NABS)
h1 = s390_isel_float_expr(env, left->Iex.Unop.arg);
case Iop_F128LOtoF64: vex_printf("F128LOtoF64"); return;
case Iop_I32StoF128: vex_printf("I32StoF128"); return;
case Iop_I64StoF128: vex_printf("I64StoF128"); return;
+ case Iop_I32UtoF128: vex_printf("I32UtoF128"); return;
+ case Iop_I64UtoF128: vex_printf("I64UtoF128"); return;
case Iop_F128toI32S: vex_printf("F128toI32S"); return;
case Iop_F128toI64S: vex_printf("F128toI64S"); return;
+ case Iop_F128toI32U: vex_printf("F128toI32U"); return;
+ case Iop_F128toI64U: vex_printf("F128toI64U"); return;
case Iop_F32toF128: vex_printf("F32toF128"); return;
case Iop_F64toF128: vex_printf("F64toF128"); return;
case Iop_F128toF64: vex_printf("F128toF64"); return;
case Iop_F64toI32S: vex_printf("F64toI32S"); return;
case Iop_F64toI64S: vex_printf("F64toI64S"); return;
case Iop_F64toI64U: vex_printf("F64toI64U"); return;
+ case Iop_F32toI32U: vex_printf("F32toI32U"); return;
+ case Iop_F32toI64U: vex_printf("F32toI64U"); return;
case Iop_F64toI32U: vex_printf("F64toI32U"); return;
case Iop_I32StoF64: vex_printf("I32StoF64"); return;
case Iop_I64StoF64: vex_printf("I64StoF64"); return;
case Iop_I64UtoF64: vex_printf("I64UtoF64"); return;
+ case Iop_I32UtoF32: vex_printf("I32UtoF32"); return;
case Iop_I64UtoF32: vex_printf("I64UtoF32"); return;
case Iop_I32UtoF64: vex_printf("I32UtoF64"); return;
case Iop_F32toI32S: BINARY(ity_RMode,Ity_F32, Ity_I32);
case Iop_F32toI64S: BINARY(ity_RMode,Ity_F32, Ity_I64);
+ case Iop_F32toI32U: BINARY(ity_RMode,Ity_F32, Ity_I32);
+ case Iop_F32toI64U: BINARY(ity_RMode,Ity_F32, Ity_I64);
+ case Iop_I32UtoF32: BINARY(ity_RMode,Ity_I32, Ity_F32);
case Iop_I32StoF32: BINARY(ity_RMode,Ity_I32, Ity_F32);
case Iop_I64StoF32: BINARY(ity_RMode,Ity_I64, Ity_F32);
case Iop_I32StoF128: UNARY(Ity_I32, Ity_F128);
case Iop_I64StoF128: UNARY(Ity_I64, Ity_F128);
+ case Iop_I32UtoF128: UNARY(Ity_I32, Ity_F128);
+ case Iop_I64UtoF128: UNARY(Ity_I64, Ity_F128);
+
case Iop_F128toI32S: BINARY(ity_RMode,Ity_F128, Ity_I32);
case Iop_F128toI64S: BINARY(ity_RMode,Ity_F128, Ity_I64);
+ case Iop_F128toI32U: BINARY(ity_RMode,Ity_F128, Ity_I32);
+ case Iop_F128toI64U: BINARY(ity_RMode,Ity_F128, Ity_I64);
+
case Iop_F32toF128: UNARY(Ity_F32, Ity_F128);
case Iop_F64toF128: UNARY(Ity_F64, Ity_F128);
Iop_I64UtoF64, /* IRRoundingMode(I32) x unsigned I64 -> F64 */
Iop_I64UtoF32, /* IRRoundingMode(I32) x unsigned I64 -> F32 */
+ Iop_I32UtoF32, /* IRRoundingMode(I32) x unsigned I32 -> F32 */
Iop_I32UtoF64, /* unsigned I32 -> F64 */
Iop_F32toI32S, /* IRRoundingMode(I32) x F32 -> signed I32 */
Iop_F32toI64S, /* IRRoundingMode(I32) x F32 -> signed I64 */
+ Iop_F32toI32U, /* IRRoundingMode(I32) x F32 -> unsigned I32 */
+ Iop_F32toI64U, /* IRRoundingMode(I32) x F32 -> unsigned I64 */
Iop_I32StoF32, /* IRRoundingMode(I32) x signed I32 -> F32 */
Iop_I64StoF32, /* IRRoundingMode(I32) x signed I64 -> F32 */
Iop_I32StoF128, /* signed I32 -> F128 */
Iop_I64StoF128, /* signed I64 -> F128 */
+ Iop_I32UtoF128, /* unsigned I32 -> F128 */
+ Iop_I64UtoF128, /* unsigned I64 -> F128 */
Iop_F32toF128, /* F32 -> F128 */
Iop_F64toF128, /* F64 -> F128 */
Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32 */
Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64 */
+ Iop_F128toI32U, /* IRRoundingMode(I32) x F128 -> unsigned I32 */
+ Iop_F128toI64U, /* IRRoundingMode(I32) x F128 -> unsigned I64 */
Iop_F128toF64, /* IRRoundingMode(I32) x F128 -> F64 */
Iop_F128toF32, /* IRRoundingMode(I32) x F128 -> F32 */