integers. Patch by Maran Pakkirisamy (maranp@linux.vnet.ibm.com).
Part of fixing BZ 307113.
git-svn-id: svn://svn.valgrind.org/vex/trunk@2632
S390_CC_OP_DFP_TDC_128 = 46,
S390_CC_OP_DFP_TDG_32 = 47,
S390_CC_OP_DFP_TDG_64 = 48,
- S390_CC_OP_DFP_TDG_128 = 49
+ S390_CC_OP_DFP_TDG_128 = 49,
+ S390_CC_OP_DFP_64_TO_UINT_32 = 50,
+ S390_CC_OP_DFP_128_TO_UINT_32 = 51,
+ S390_CC_OP_DFP_64_TO_UINT_64 = 52,
+ S390_CC_OP_DFP_128_TO_UINT_64 = 53,
+ S390_CC_OP_DFP_64_TO_INT_32 = 54,
+ S390_CC_OP_DFP_128_TO_INT_32 = 55
};
/*------------------------------------------------------------*/
| S390_CC_OP_DFP_TDG_32 | D value | Z group | |
| S390_CC_OP_DFP_TDG_64 | D value | Z group | |
| S390_CC_OP_DFP_TDG_128 | D value hi 64 bits | D value low 64 bits | Z group |
+ | S390_CC_OP_DFP_64_TO_UINT_32 | D source | Z rounding mode | |
+ | S390_CC_OP_DFP_128_TO_UINT_32 | D source hi 64 bits | D source low 64 bits | Z rounding mode |
+ | S390_CC_OP_DFP_64_TO_UINT_64 | D source | Z rounding mode | |
+ | S390_CC_OP_DFP_128_TO_UINT_64 | D source hi 64 bits | D source low 64 bits | Z rounding mode |
+ | S390_CC_OP_DFP_64_TO_INT_64 | D source | Z rounding mode | |
+ | S390_CC_OP_DFP_128_TO_INT_64 | D source hi 64 bits | D source low 64 bits | Z rounding mode |
+--------------------------------+-----------------------+----------------------+-----------------+
*/
psw >> 28; /* cc */ \
})
+/* Convert an IRRoundingModeDFP value to s390_dfp_round_t */
+#if defined(VGA_s390x)
+static s390_dfp_round_t
+decode_dfp_rounding_mode(UInt irrm)
+{
+ switch (irrm) {
+ case Irrm_DFP_NEAREST:
+ return S390_DFP_ROUND_NEAREST_EVEN_4;
+ case Irrm_DFP_NegINF:
+ return S390_DFP_ROUND_NEGINF_7;
+ case Irrm_DFP_PosINF:
+ return S390_DFP_ROUND_POSINF_6;
+ case Irrm_DFP_ZERO:
+ return S390_DFP_ROUND_ZERO_5;
+ case Irrm_DFP_NEAREST_TIE_AWAY_0:
+ return S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1;
+ case Irrm_DFP_PREPARE_SHORTER:
+ return S390_DFP_ROUND_PREPARE_SHORT_3;
+ case Irrm_DFP_AWAY_FROM_ZERO:
+ return S390_DFP_ROUND_AWAY_0;
+ case Irrm_DFP_NEAREST_TIE_TOWARD_0:
+ return S390_DFP_ROUND_NEAREST_TIE_TOWARD_0;
+ }
+ vpanic("decode_dfp_rounding_mode");
+}
+#endif
+
#define S390_CC_FOR_DFP_RESULT(cc_dep1) \
({ \
__asm__ volatile ( \
psw >> 28; /* cc */ \
})
+#define S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,rounding_mode) \
+ ({ \
+ __asm__ volatile ( \
+ opcode ",0,%[op]," #rounding_mode ",0\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [op] "f"(cc_dep1) \
+ : "cc", "r0"); \
+ psw >> 28; /* cc */ \
+ })
+
+#define S390_CC_FOR_DFP_CONVERT(opcode,cc_dep1,cc_dep2) \
+ ({ \
+ UInt cc; \
+ switch (decode_dfp_rounding_mode(cc_dep2)) { \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1: \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,1); \
+ break; \
+ case S390_DFP_ROUND_PREPARE_SHORT_3: \
+ case S390_DFP_ROUND_PREPARE_SHORT_15: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,3); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_EVEN_4: \
+ case S390_DFP_ROUND_NEAREST_EVEN_8: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,4); \
+ break; \
+ case S390_DFP_ROUND_ZERO_5: \
+ case S390_DFP_ROUND_ZERO_9: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,5); \
+ break; \
+ case S390_DFP_ROUND_POSINF_6: \
+ case S390_DFP_ROUND_POSINF_10: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,6); \
+ break; \
+ case S390_DFP_ROUND_NEGINF_7: \
+ case S390_DFP_ROUND_NEGINF_11: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,7); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,13); \
+ break; \
+ case S390_DFP_ROUND_AWAY_0: \
+ cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,14); \
+ break; \
+ default: \
+ vpanic("unexpected dfp rounding mode"); \
+ } \
+ cc; \
+ })
+
+#define S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,rounding_mode) \
+ ({ \
+ __asm__ volatile ( \
+ opcode ",0,%[op]," #rounding_mode ",0\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [op] "f"(cc_dep1) \
+ : "cc", "r0"); \
+ psw >> 28; /* cc */ \
+ })
+
+#define S390_CC_FOR_DFP_UCONVERT(opcode,cc_dep1,cc_dep2) \
+ ({ \
+ UInt cc; \
+ switch (decode_dfp_rounding_mode(cc_dep2)) { \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1: \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,1); \
+ break; \
+ case S390_DFP_ROUND_PREPARE_SHORT_3: \
+ case S390_DFP_ROUND_PREPARE_SHORT_15: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,3); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_EVEN_4: \
+ case S390_DFP_ROUND_NEAREST_EVEN_8: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,4); \
+ break; \
+ case S390_DFP_ROUND_ZERO_5: \
+ case S390_DFP_ROUND_ZERO_9: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,5); \
+ break; \
+ case S390_DFP_ROUND_POSINF_6: \
+ case S390_DFP_ROUND_POSINF_10: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,6); \
+ break; \
+ case S390_DFP_ROUND_NEGINF_7: \
+ case S390_DFP_ROUND_NEGINF_11: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,7); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,13); \
+ break; \
+ case S390_DFP_ROUND_AWAY_0: \
+ cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,14); \
+ break; \
+ default: \
+ vpanic("unexpected dfp rounding mode"); \
+ } \
+ cc; \
+ })
+
+#define S390_CC_FOR_DFP128_CONVERT_AUX(opcode,hi,lo,rounding_mode) \
+ ({ \
+ __asm__ volatile ( \
+ "ldr 4,%[high]\n\t" \
+ "ldr 6,%[low]\n\t" \
+ opcode ",0,4," #rounding_mode ",0\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [high] "f"(hi), [low] "f"(lo) \
+ : "cc", "r0", "f4", "f6"); \
+ psw >> 28; /* cc */ \
+ })
+
+#define S390_CC_FOR_DFP128_CONVERT(opcode,cc_dep1,cc_dep2,cc_ndep) \
+ ({ \
+ UInt cc; \
+ /* Recover the original DEP2 value. See comment near \
+ s390_cc_thunk_put3 for rationale. */ \
+ cc_dep2 = cc_dep2 ^ cc_ndep; \
+ switch (decode_dfp_rounding_mode(cc_ndep)) { \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1: \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,1); \
+ break; \
+ case S390_DFP_ROUND_PREPARE_SHORT_3: \
+ case S390_DFP_ROUND_PREPARE_SHORT_15: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,3); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_EVEN_4: \
+ case S390_DFP_ROUND_NEAREST_EVEN_8: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,4); \
+ break; \
+ case S390_DFP_ROUND_ZERO_5: \
+ case S390_DFP_ROUND_ZERO_9: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,5); \
+ break; \
+ case S390_DFP_ROUND_POSINF_6: \
+ case S390_DFP_ROUND_POSINF_10: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,6); \
+ break; \
+ case S390_DFP_ROUND_NEGINF_7: \
+ case S390_DFP_ROUND_NEGINF_11: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,7); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,13); \
+ break; \
+ case S390_DFP_ROUND_AWAY_0: \
+ cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,14); \
+ break; \
+ default: \
+ vpanic("unexpected dfp rounding mode"); \
+ } \
+ cc; \
+ })
+
+#define S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,hi,lo,rounding_mode) \
+ ({ \
+ __asm__ volatile ( \
+ "ldr 4,%[high]\n\t" \
+ "ldr 6,%[low]\n\t" \
+ opcode ",0,4," #rounding_mode ",0\n\t" \
+ "ipm %[psw]\n\t" : [psw] "=d"(psw) \
+ : [high] "f"(hi), [low] "f"(lo) \
+ : "cc", "r0", "f4", "f6"); \
+ psw >> 28; /* cc */ \
+ })
+
+#define S390_CC_FOR_DFP128_UCONVERT(opcode,cc_dep1,cc_dep2,cc_ndep) \
+ ({ \
+ UInt cc; \
+ /* Recover the original DEP2 value. See comment near \
+ s390_cc_thunk_put3 for rationale. */ \
+ cc_dep2 = cc_dep2 ^ cc_ndep; \
+ switch (decode_dfp_rounding_mode(cc_ndep)) { \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1: \
+ case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,1); \
+ break; \
+ case S390_DFP_ROUND_PREPARE_SHORT_3: \
+ case S390_DFP_ROUND_PREPARE_SHORT_15: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,3); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_EVEN_4: \
+ case S390_DFP_ROUND_NEAREST_EVEN_8: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,4); \
+ break; \
+ case S390_DFP_ROUND_ZERO_5: \
+ case S390_DFP_ROUND_ZERO_9: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,5); \
+ break; \
+ case S390_DFP_ROUND_POSINF_6: \
+ case S390_DFP_ROUND_POSINF_10: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,6); \
+ break; \
+ case S390_DFP_ROUND_NEGINF_7: \
+ case S390_DFP_ROUND_NEGINF_11: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,7); \
+ break; \
+ case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,13); \
+ break; \
+ case S390_DFP_ROUND_AWAY_0: \
+ cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,14); \
+ break; \
+ default: \
+ vpanic("unexpected dfp rounding mode"); \
+ } \
+ cc; \
+ })
+
/* Return the value of the condition code from the supplied thunk parameters.
This is not the value of the PSW. It is the value of the 2 CC bits within
return S390_CC_FOR_DFP128_TD(".insn rxe, 0xed0000000059", cc_dep1,
cc_dep2, cc_ndep);
+ case S390_CC_OP_DFP_64_TO_INT_32: /* CFDTR */
+ return S390_CC_FOR_DFP_CONVERT(".insn rrf,0xb9410000", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_DFP_128_TO_INT_32: /* CFXTR */
+ return S390_CC_FOR_DFP128_CONVERT(".insn rrf,0xb9490000", cc_dep1,
+ cc_dep2, cc_ndep);
+
+ case S390_CC_OP_DFP_64_TO_UINT_32: /* CLFDTR */
+ return S390_CC_FOR_DFP_UCONVERT(".insn rrf,0xb9430000", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_DFP_128_TO_UINT_32: /* CLFXTR */
+ return S390_CC_FOR_DFP128_UCONVERT(".insn rrf,0xb94b0000", cc_dep1,
+ cc_dep2, cc_ndep);
+
+ case S390_CC_OP_DFP_64_TO_UINT_64: /* CLGDTR */
+ return S390_CC_FOR_DFP_UCONVERT(".insn rrf,0xb9420000", cc_dep1, cc_dep2);
+
+ case S390_CC_OP_DFP_128_TO_UINT_64: /* CLGXTR */
+ return S390_CC_FOR_DFP128_UCONVERT(".insn rrf,0xb94a0000", cc_dep1,
+ cc_dep2, cc_ndep);
+
default:
break;
}
return "cxtr";
}
+static const HChar *
+s390_irgen_CDFTR(UChar m3 __attribute__((unused)),
+ UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_dpr_dw0(r1, unop(Iop_I32StoD64, mkexpr(op2)));
+ }
+ return "cdftr";
+}
+
+static const HChar *
+s390_irgen_CXFTR(UChar m3 __attribute__((unused)),
+ UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_dpr_pair(r1, unop(Iop_I32StoD128, mkexpr(op2)));
+ }
+ return "cxftr";
+}
+
+static const HChar *
+s390_irgen_CDLFTR(UChar m3 __attribute__((unused)),
+ UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_dpr_dw0(r1, unop(Iop_I32UtoD64, mkexpr(op2)));
+ }
+ return "cdlftr";
+}
+
+static const HChar *
+s390_irgen_CXLFTR(UChar m3 __attribute__((unused)),
+ UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op2 = newTemp(Ity_I32);
+
+ assign(op2, get_gpr_w1(r2));
+ put_dpr_pair(r1, unop(Iop_I32UtoD128, mkexpr(op2)));
+ }
+ return "cxlftr";
+}
+
+static const HChar *
+s390_irgen_CDLGTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_dpr_dw0(r1, binop(Iop_I64UtoD64,
+ mkexpr(encode_dfp_rounding_mode(m3)),
+ mkexpr(op2)));
+ }
+ return "cdlgtr";
+}
+
+static const HChar *
+s390_irgen_CXLGTR(UChar m3 __attribute__((unused)),
+ UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op2 = newTemp(Ity_I64);
+
+ assign(op2, get_gpr_dw0(r2));
+ put_dpr_pair(r1, unop(Iop_I64UtoD128, mkexpr(op2)));
+ }
+ return "cxlgtr";
+}
+
+static const HChar *
+s390_irgen_CFDTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op = newTemp(Ity_D64);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+ assign(op, get_dpr_dw0(r2));
+ assign(result, binop(Iop_D64toI32S, mkexpr(rounding_mode),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_INT_32, op, rounding_mode);
+ }
+ return "cfdtr";
+}
+
+static const HChar *
+s390_irgen_CFXTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op = newTemp(Ity_D128);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+ assign(op, get_dpr_pair(r2));
+ assign(result, binop(Iop_D128toI32S, mkexpr(rounding_mode),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_INT_32, op, rounding_mode);
+ }
+ return "cfxtr";
+}
+
static const HChar *
s390_irgen_CEDTR(UChar r1, UChar r2)
{
return "cextr";
}
+static const HChar *
+s390_irgen_CLFDTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op = newTemp(Ity_D64);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+ assign(op, get_dpr_dw0(r2));
+ assign(result, binop(Iop_D64toI32U, mkexpr(rounding_mode),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_UINT_32, op, rounding_mode);
+ }
+ return "clfdtr";
+}
+
+static const HChar *
+s390_irgen_CLFXTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op = newTemp(Ity_D128);
+ IRTemp result = newTemp(Ity_I32);
+ IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+ assign(op, get_dpr_pair(r2));
+ assign(result, binop(Iop_D128toI32U, mkexpr(rounding_mode),
+ mkexpr(op)));
+ put_gpr_w1(r1, mkexpr(result));
+ s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_UINT_32, op, rounding_mode);
+ }
+ return "clfxtr";
+}
+
+static const HChar *
+s390_irgen_CLGDTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op = newTemp(Ity_D64);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+ assign(op, get_dpr_dw0(r2));
+ assign(result, binop(Iop_D64toI64U, mkexpr(rounding_mode),
+ mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_UINT_64, op, rounding_mode);
+ }
+ return "clgdtr";
+}
+
+static const HChar *
+s390_irgen_CLGXTR(UChar m3, UChar m4 __attribute__((unused)),
+ UChar r1, UChar r2)
+{
+ vassert(s390_host_has_dfp);
+
+ if (! s390_host_has_fpext) {
+ emulation_failure(EmFail_S390X_fpext);
+ } else {
+ IRTemp op = newTemp(Ity_D128);
+ IRTemp result = newTemp(Ity_I64);
+ IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+ assign(op, get_dpr_pair(r2));
+ assign(result, binop(Iop_D128toI64U, mkexpr(rounding_mode),
+ mkexpr(op)));
+ put_gpr_dw0(r1, mkexpr(result));
+ s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_UINT_64, op,
+ rounding_mode);
+ }
+ return "clgxtr";
+}
+
static const HChar *
s390_irgen_DDTRA(UChar r3, UChar m4, UChar r1, UChar r2)
{
ovl.fmt.RRE.r2); goto ok;
case 0xb93e: /* KIMD */ goto unimplemented;
case 0xb93f: /* KLMD */ goto unimplemented;
- case 0xb941: /* CFDTR */ goto unimplemented;
- case 0xb942: /* CLGDTR */ goto unimplemented;
- case 0xb943: /* CLFDTR */ goto unimplemented;
+ case 0xb941: s390_format_RRF_UURF(s390_irgen_CFDTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb942: s390_format_RRF_UURF(s390_irgen_CLGDTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb943: s390_format_RRF_UURF(s390_irgen_CLFDTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
case 0xb946: s390_format_RRE_RR(s390_irgen_BCTGR, ovl.fmt.RRE.r1,
ovl.fmt.RRE.r2); goto ok;
- case 0xb949: /* CFXTR */ goto unimplemented;
- case 0xb94a: /* CLGXTR */ goto unimplemented;
- case 0xb94b: /* CLFXTR */ goto unimplemented;
- case 0xb951: /* CDFTR */ goto unimplemented;
- case 0xb952: /* CDLGTR */ goto unimplemented;
- case 0xb953: /* CDLFTR */ goto unimplemented;
- case 0xb959: /* CXFTR */ goto unimplemented;
- case 0xb95a: /* CXLGTR */ goto unimplemented;
- case 0xb95b: /* CXLFTR */ goto unimplemented;
+ case 0xb949: s390_format_RRF_UURF(s390_irgen_CFXTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb94a: s390_format_RRF_UURF(s390_irgen_CLGXTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb94b: s390_format_RRF_UURF(s390_irgen_CLFXTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb951: s390_format_RRF_UUFR(s390_irgen_CDFTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb952: s390_format_RRF_UUFR(s390_irgen_CDLGTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb953: s390_format_RRF_UUFR(s390_irgen_CDLFTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb959: s390_format_RRF_UUFR(s390_irgen_CXFTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb95a: s390_format_RRF_UUFR(s390_irgen_CXLGTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
+ case 0xb95b: s390_format_RRF_UUFR(s390_irgen_CXLFTR, ovl.fmt.RRF2.m3,
+ ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+ ovl.fmt.RRF2.r2); goto ok;
case 0xb960: /* CGRT */ goto unimplemented;
case 0xb961: /* CLGRT */ goto unimplemented;
case 0xb972: /* CRT */ goto unimplemented;
}
+static UChar *
+s390_emit_CDFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdftr", r1, m3, r2, m4);
+ }
+
+ return emit_RRF2(p, 0xb9510000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxftr", r1, m3, r2, m4);
+ }
+
+ return emit_RRF2(p, 0xb9590000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlftr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb9530000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlftr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb95b0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLGTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlgtr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb9520000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLGTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlgtr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb95a0000, m3, m4, r1, r2);
+}
+
+
static UChar *
s390_emit_CEDTR(UChar *p, UChar r1, UChar r2)
{
}
+static UChar *
+s390_emit_CFDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "cfdtr", r1, m3, r2, m4);
+ }
+
+ return emit_RRF2(p, 0xb9410000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CFXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "cfxtr", r1, m3, r2, m4);
+ }
+
+ return emit_RRF2(p, 0xb9490000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfdtr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb9430000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfxtr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb94b0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgdtr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb9420000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+ vassert(m4 == 0);
+ vassert(s390_host_has_dfp);
+ vassert(s390_host_has_fpext);
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+ s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgxtr", r1, m3, r2, m4);
+
+ return emit_RRF2(p, 0xb94a0000, m3, m4, r1, r2);
+}
+
+
static UChar *
s390_emit_DDTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
{
case S390_DFP_D64_TO_D32:
case S390_DFP_D64_TO_D128:
case S390_DFP_D128_TO_D64: op = "v-d2d"; break;
+ case S390_DFP_I32_TO_D64:
+ case S390_DFP_I32_TO_D128: op = "v-i2d"; break;
+ case S390_DFP_U32_TO_D64:
+ case S390_DFP_U32_TO_D128:
+ case S390_DFP_U64_TO_D64:
+ case S390_DFP_U64_TO_D128: op = "v-u2d"; break;
+ case S390_DFP_D64_TO_I32:
+ case S390_DFP_D128_TO_I32: op = "v-d2i"; break;
+ case S390_DFP_D64_TO_U32:
+ case S390_DFP_D64_TO_U64:
+ case S390_DFP_D128_TO_U32:
+ case S390_DFP_D128_TO_U64: op = "v-d2u"; break;
default: goto fail;
}
s390_sprintf(buf, "%M %R,%R", op, insn->variant.dfp_convert.dst_hi,
case S390_INSN_DFP_CONVERT:
switch (insn->variant.dfp_convert.tag) {
- case S390_DFP_D32_TO_D64: p += vex_sprintf(p, "4 -> "); goto common;
+ case S390_DFP_D32_TO_D64:
+ case S390_DFP_I32_TO_D64:
+ case S390_DFP_I32_TO_D128:
+ case S390_DFP_U32_TO_D64:
+ case S390_DFP_U32_TO_D128: p += vex_sprintf(p, "4 -> "); goto common;
case S390_DFP_D64_TO_D32:
- case S390_DFP_D64_TO_D128:p += vex_sprintf(p, "8 -> "); goto common;
- case S390_DFP_D128_TO_D64:p += vex_sprintf(p, "16 -> "); goto common;
+ case S390_DFP_D64_TO_D128:
+ case S390_DFP_U64_TO_D64:
+ case S390_DFP_U64_TO_D128:
+ case S390_DFP_D64_TO_I32:
+ case S390_DFP_D64_TO_U32:
+ case S390_DFP_D64_TO_U64: p += vex_sprintf(p, "8 -> "); goto common;
+ case S390_DFP_D128_TO_D64:
+ case S390_DFP_D128_TO_I32:
+ case S390_DFP_D128_TO_U32:
+ case S390_DFP_D128_TO_U64: p += vex_sprintf(p, "16 -> "); goto common;
default:
goto common;
}
switch (insn->variant.dfp_convert.tag) {
+ /* Convert to fixed */
+ case S390_DFP_D64_TO_I32: return s390_emit_CFDTR(buf, m3, m4, r1, r2);
+ case S390_DFP_D128_TO_I32: return s390_emit_CFXTR(buf, m3, m4, r1, r2);
+
+ /* Convert to logical */
+ case S390_DFP_D64_TO_U32: return s390_emit_CLFDTR(buf, m3, m4, r1, r2);
+ case S390_DFP_D128_TO_U32: return s390_emit_CLFXTR(buf, m3, m4, r1, r2);
+ case S390_DFP_D64_TO_U64: return s390_emit_CLGDTR(buf, m3, m4, r1, r2);
+ case S390_DFP_D128_TO_U64: return s390_emit_CLGXTR(buf, m3, m4, r1, r2);
+
+ /* Convert from fixed */
+ case S390_DFP_I32_TO_D64: return s390_emit_CDFTR(buf, 0, m4, r1, r2);
+ case S390_DFP_I32_TO_D128: return s390_emit_CXFTR(buf, 0, m4, r1, r2);
+
+ /* Convert from logical */
+ case S390_DFP_U32_TO_D64: return s390_emit_CDLFTR(buf, m3, m4, r1, r2);
+ case S390_DFP_U64_TO_D64: return s390_emit_CDLGTR(buf, m3, m4, r1, r2);
+ case S390_DFP_U32_TO_D128: return s390_emit_CXLFTR(buf, m3, m4, r1, r2);
+ case S390_DFP_U64_TO_D128: return s390_emit_CXLGTR(buf, m3, m4, r1, r2);
+
/* Load lengthened */
case S390_DFP_D32_TO_D64: return s390_emit_LDETR(buf, m4, r1, r2);
case S390_DFP_D64_TO_D128: return s390_emit_LXDTR(buf, m4, r1, r2);
S390_DFP_D32_TO_D64,
S390_DFP_D64_TO_D32,
S390_DFP_D64_TO_D128,
- S390_DFP_D128_TO_D64
+ S390_DFP_D128_TO_D64,
+ S390_DFP_I32_TO_D64,
+ S390_DFP_I32_TO_D128,
+ S390_DFP_U32_TO_D64,
+ S390_DFP_U32_TO_D128,
+ S390_DFP_U64_TO_D64,
+ S390_DFP_U64_TO_D128,
+ S390_DFP_D64_TO_I32,
+ S390_DFP_D64_TO_U32,
+ S390_DFP_D64_TO_U64,
+ S390_DFP_D128_TO_I32,
+ S390_DFP_D128_TO_U32,
+ S390_DFP_D128_TO_U64
} s390_dfp_conv_t;
/* The kind of binary DFP operations */
case Iop_F128toI64S: conv = S390_BFP_F128_TO_I64; goto do_convert_128;
case Iop_F128toI32U: conv = S390_BFP_F128_TO_U32; goto do_convert_128;
case Iop_F128toI64U: conv = S390_BFP_F128_TO_U64; goto do_convert_128;
+ case Iop_D64toI32S: conv = S390_DFP_D64_TO_I32; goto do_convert_dfp;
+ case Iop_D64toI32U: conv = S390_DFP_D64_TO_U32; goto do_convert_dfp;
+ case Iop_D64toI64U: conv = S390_DFP_D64_TO_U64; goto do_convert_dfp;
+ case Iop_D128toI32S: conv = S390_DFP_D128_TO_I32; goto do_convert_dfp128;
+ case Iop_D128toI32U: conv = S390_DFP_D128_TO_U32; goto do_convert_dfp128;
+ case Iop_D128toI64U: conv = S390_DFP_D128_TO_U64; goto do_convert_dfp128;
do_convert: {
s390_bfp_round_t rounding_mode;
return res;
}
+ do_convert_dfp: {
+ s390_dfp_round_t rounding_mode;
+
+ res = newVRegI(env);
+ h1 = s390_isel_dfp_expr(env, arg2); /* Process operand */
+
+ rounding_mode = get_dfp_rounding_mode(env, arg1);
+ addInstr(env, s390_insn_dfp_convert(size, conv, res, h1,
+ rounding_mode));
+ return res;
+ }
+
+ do_convert_dfp128: {
+ s390_dfp_round_t rounding_mode;
+ HReg op_hi, op_lo, f13, f15;
+
+ res = newVRegI(env);
+ s390_isel_dfp128_expr(&op_hi, &op_lo, env, arg2); /* operand */
+
+ /* We use non-virtual registers r13 and r15 as pair */
+ f13 = make_fpr(13);
+ f15 = make_fpr(15);
+
+ /* operand --> (f13, f15) */
+ addInstr(env, s390_insn_move(8, f13, op_hi));
+ addInstr(env, s390_insn_move(8, f15, op_lo));
+
+ rounding_mode = get_dfp_rounding_mode(env, arg1);
+ addInstr(env, s390_insn_dfp128_convert_from(size, conv, res, f13,
+ f15, rounding_mode));
+ return res;
+ }
+
case Iop_8HLto16:
case Iop_16HLto32:
case Iop_32HLto64: {
switch (expr->Iex.Unop.op) {
case Iop_D64toD128: conv = S390_DFP_D64_TO_D128; goto convert_dfp;
+ case Iop_I32StoD128: conv = S390_DFP_I32_TO_D128; goto convert_int;
+ case Iop_I32UtoD128: conv = S390_DFP_U32_TO_D128; goto convert_int;
+ case Iop_I64UtoD128: conv = S390_DFP_U64_TO_D128; goto convert_int;
default:
goto irreducible;
}
addInstr(env, s390_insn_dfp128_convert_to(16, conv, f12, f14, op));
goto move_dst;
+ convert_int:
+ op = s390_isel_int_expr(env, left);
+ addInstr(env, s390_insn_dfp128_convert_to(16, conv, f12, f14, op));
+ goto move_dst;
+
move_dst:
/* Move result to virtual destination registers */
*dst_hi = newVRegF(env);
switch (op) {
case Iop_D64toD32: conv = S390_DFP_D64_TO_D32; goto convert_dfp;
+ case Iop_I64UtoD64: conv = S390_DFP_U64_TO_D64; goto convert_int;
convert_dfp:
h1 = s390_isel_dfp_expr(env, left);
goto convert;
+ convert_int:
+ h1 = s390_isel_int_expr(env, left);
+ goto convert;
+
convert: {
s390_dfp_round_t rounding_mode;
/* convert-from-fixed and load-rounded have a rounding mode field
switch (op) {
case Iop_D32toD64: conv = S390_DFP_D32_TO_D64; goto convert_dfp1;
+ case Iop_I32StoD64: conv = S390_DFP_I32_TO_D64; goto convert_int1;
+ case Iop_I32UtoD64: conv = S390_DFP_U32_TO_D64; goto convert_int1;
convert_dfp1:
h1 = s390_isel_dfp_expr(env, left);
goto convert1;
+ convert_int1:
+ h1 = s390_isel_int_expr(env, left);
+ goto convert1;
+
convert1:
dst = newVRegF(env);
/* No rounding mode is needed for these conversions. Just stick