UInt s390_do_cvb(ULong decimal);
ULong s390_do_cvd(ULong binary);
ULong s390_do_ecag(ULong op2addr);
+UInt s390_do_pfpo(UInt gpr0);
/* The various ways to compute the condition code. */
enum {
S390_CC_OP_DFP_64_TO_INT_32 = 54,
S390_CC_OP_DFP_128_TO_INT_32 = 55,
S390_CC_OP_DFP_64_TO_INT_64 = 56,
- S390_CC_OP_DFP_128_TO_INT_64 = 57
+ S390_CC_OP_DFP_128_TO_INT_64 = 57,
+ S390_CC_OP_PFPO_64 = 58,
+ S390_CC_OP_PFPO_128 = 59
};
/*------------------------------------------------------------*/
| S390_CC_OP_DFP_128_TO_INT_32 | D source hi 64 bits | D source low 64 bits | Z rounding mode |
| S390_CC_OP_DFP_64_TO_INT_64 | D source | Z rounding mode | |
| S390_CC_OP_DFP_128_TO_INT_64 | D source hi 64 bits | D source low 64 bits | Z rounding mode |
+ | S390_CC_OP_PFPO_64 | F|D source | Z GR0 low 32 bits | |
+ | S390_CC_OP_PFPO_128 | F|D source hi 64 bits | F|D src low 64 bits | Z GR0 low 32 bits |
+--------------------------------+-----------------------+----------------------+-----------------+
*/
ULong s390_do_ecag(ULong op2addr) { return 0; }
#endif
+/*------------------------------------------------------------*/
+/*--- Clean helper for "Perform Floating Point Operation". ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+UInt
+s390_do_pfpo(UInt gpr0)
+{
+ UChar rm;
+ UChar op1_ty, op2_ty;
+
+ rm = gpr0 & 0xf;
+ if (rm > 1 && rm < 8)
+ return EmFail_S390X_invalid_PFPO_rounding_mode;
+
+ op1_ty = (gpr0 >> 16) & 0xff; // gpr0[40:47]
+ op2_ty = (gpr0 >> 8) & 0xff; // gpr0[48:55]
+ /* Operand type must be BFP 32, 64, 128 or DFP 32, 64, 128
+ which correspond to 0x5, 0x6, 0x7, 0x8, 0x9, 0xa respectively.
+ Any other operand type value is unsupported */
+ if ((op1_ty == op2_ty) ||
+ (op1_ty < 0x5 || op1_ty > 0xa) ||
+ (op2_ty < 0x5 || op2_ty > 0xa))
+ return EmFail_S390X_invalid_PFPO_function;
+
+ return EmNote_NONE;
+}
+#else
+UInt s390_do_pfpo(UInt gpr0) { return 0; }
+#endif
+
/*------------------------------------------------------------*/
/*--- Helper for condition code. ---*/
/*------------------------------------------------------------*/
return S390_CC_FOR_DFP128_UCONVERT(".insn rrf,0xb94a0000", cc_dep1,
cc_dep2, cc_ndep);
+ case S390_CC_OP_PFPO_64: {
+ __asm__ volatile(
+ "ldr 4, %[cc_dep1]\n\t"
+ "lr 0, %[cc_dep2]\n\t" /* 32 bit register move */
+ ".short 0x010a\n\t" /* PFPO */
+ "ipm %[psw]\n\t" : [psw] "=d"(psw)
+ : [cc_dep1] "1f"(cc_dep1),
+ [cc_dep2] "d" (cc_dep2)
+ : "r0", "r1", "f4");
+ return psw >> 28; /* cc */
+ }
+
+ case S390_CC_OP_PFPO_128: {
+ __asm__ volatile(
+ "ldr 4,%[cc_dep1]\n\t"
+ "ldr 6,%[cc_dep2]\n\t"
+ "lr 0,%[cc_ndep]\n\t" /* 32 bit register move */
+ ".short 0x010a\n\t" /* PFPO */
+ "ipm %[psw]\n\t" : [psw] "=d"(psw)
+ : [cc_dep1] "f"(cc_dep1),
+ [cc_dep2] "f"(cc_dep2),
+ [cc_ndep] "d"(cc_ndep)
+ : "r0", "r1", "f0", "f2", "f4", "f6");
+ return psw >> 28; /* cc */
+ }
+
default:
break;
}
s390_disasm(ENC2(MNM, UINT), mnm, i);
}
+static void
+s390_format_E(const HChar *(*irgen)(void))
+{
+ const HChar *mnm = irgen();
+
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+ s390_disasm(ENC1(MNM), mnm);
+}
+
static void
s390_format_RI(const HChar *(*irgen)(UChar r1, UShort i2),
UChar r1, UShort i2)
return "pfdrl";
}
+static IRExpr *
+get_rounding_mode_from_gr0(void)
+{
+ IRTemp rm_bits = newTemp(Ity_I32);
+ IRExpr *s390rm;
+ IRExpr *irrm;
+
+ vassert(s390_host_has_pfpo);
+ /* The dfp/bfp rounding mode is stored in bits [60:63] of GR 0
+ when PFPO insn is called. So, extract the bits at [60:63] */
+ assign(rm_bits, binop(Iop_And32, get_gpr_w1(0), mkU32(0xf)));
+ s390rm = mkexpr(rm_bits);
+ irrm = mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0x1)),
+ mkexpr(encode_bfp_rounding_mode( S390_BFP_ROUND_PER_FPC)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0x8)),
+ mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_NEAREST_EVEN_8)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0x9)),
+ mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_ZERO_9)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xa)),
+ mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_POSINF_10)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xb)),
+ mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_NEGINF_11)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xc)),
+ mkexpr(encode_dfp_rounding_mode(
+ S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xd)),
+ mkexpr(encode_dfp_rounding_mode(
+ S390_DFP_ROUND_NEAREST_TIE_TOWARD_0)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xe)),
+ mkexpr(encode_dfp_rounding_mode(
+ S390_DFP_ROUND_AWAY_0)),
+ mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xf)),
+ mkexpr(encode_dfp_rounding_mode(
+ S390_DFP_ROUND_PREPARE_SHORT_15)),
+ /* if rounding mode is 0 or invalid (2-7)
+ set S390_DFP_ROUND_PER_FPC_0 */
+ mkexpr(encode_dfp_rounding_mode(
+ S390_DFP_ROUND_PER_FPC_0)))))))))));
+
+ return irrm;
+}
+
+static IRExpr *
+s390_call_pfpo_helper(IRExpr *gr0)
+{
+ IRExpr **args, *call;
+
+ args = mkIRExprVec_1(gr0);
+ call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+ "s390_do_pfpo", &s390_do_pfpo, args);
+ /* Nothing is excluded from definedness checking. */
+ call->Iex.CCall.cee->mcx_mask = 0;
+
+ return call;
+}
+
+static const HChar *
+s390_irgen_PFPO(void)
+{
+ IRTemp gr0 = newTemp(Ity_I32); /* word 1 [32:63] of GR 0 */
+ IRTemp test_bit = newTemp(Ity_I32); /* bit 32 of GR 0 - test validity */
+ IRTemp fn = newTemp(Ity_I32); /* [33:55] of GR 0 - function code */
+ IRTemp ef = newTemp(Ity_I32); /* Emulation Failure */
+ IRTemp src1 = newTemp(Ity_F64);
+ IRTemp dst1 = newTemp(Ity_D64);
+ IRTemp src2 = newTemp(Ity_D64);
+ IRTemp dst2 = newTemp(Ity_F64);
+ IRTemp src3 = newTemp(Ity_F64);
+ IRTemp dst3 = newTemp(Ity_D128);
+ IRTemp src4 = newTemp(Ity_D128);
+ IRTemp dst4 = newTemp(Ity_F64);
+ IRTemp src5 = newTemp(Ity_F128);
+ IRTemp dst5 = newTemp(Ity_D128);
+ IRTemp src6 = newTemp(Ity_D128);
+ IRTemp dst6 = newTemp(Ity_F128);
+ IRExpr *irrm;
+
+ vassert(s390_host_has_pfpo);
+
+ assign(gr0, get_gpr_w1(0));
+ /* get function code */
+ assign(fn, binop(Iop_And32, binop(Iop_Shr32, mkexpr(gr0), mkU8(8)),
+ mkU32(0x7fffff)));
+ /* get validity test bit */
+ assign(test_bit, binop(Iop_And32, binop(Iop_Shr32, mkexpr(gr0), mkU8(31)),
+ mkU32(0x1)));
+ irrm = get_rounding_mode_from_gr0();
+
+ /* test_bit is 1 */
+ assign(src1, get_fpr_dw0(4)); /* get source from FPR 4,6 */
+ s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src1, gr0);
+
+ /* Return code set in GR1 is usually 0. Non-zero value is set only
+ when exceptions are raised. See Programming Notes point 5 in the
+ instrcution description of pfpo in POP. Since valgrind does not
+ model exception, it might be safe to just set 0 to GR 1. */
+ put_gpr_w1(1, mkU32(0x0));
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(test_bit), mkU32(0x1)));
+
+ /* Check validity of function code in GR 0 */
+ assign(ef, s390_call_pfpo_helper(unop(Iop_32Uto64, mkexpr(gr0))));
+
+ /* fixs390: Function emulation_failure can be used if it takes argument as
+ IRExpr * instead of VexEmNote. */
+ stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_EMNOTE), mkexpr(ef)));
+ dis_res->whatNext = Dis_StopHere;
+ dis_res->jk_StopHere = Ijk_EmFail;
+
+ stmt(
+ IRStmt_Exit(
+ binop(Iop_CmpNE32, mkexpr(ef), mkU32(EmNote_NONE)),
+ Ijk_EmFail,
+ IRConst_U64(guest_IA_next_instr),
+ S390X_GUEST_OFFSET(guest_IA)
+ )
+ );
+
+ /* F64 -> D64 */
+ /* get source from FPR 4,6 - already set in src1 */
+ assign(dst1, binop(Iop_F64toD64, irrm, mkexpr(src1)));
+ put_dpr_dw0(0, mkexpr(dst1)); /* put the result in FPR 0,2 */
+ put_gpr_w1(1, mkU32(0x0));
+ s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src1, gr0);
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F64_TO_D64)));
+
+ /* D64 -> F64 */
+ assign(src2, get_dpr_dw0(4)); /* get source from FPR 4,6 */
+ assign(dst2, binop(Iop_D64toF64, irrm, mkexpr(src2)));
+ put_fpr_dw0(0, mkexpr(dst2)); /* put the result in FPR 0,2 */
+ put_gpr_w1(1, mkU32(0x0));
+ s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src2, gr0);
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D64_TO_F64)));
+
+ /* F64 -> D128 */
+ assign(src3, get_fpr_dw0(4)); /* get source from FPR 4,6 */
+ assign(dst3, binop(Iop_F64toD128, irrm, mkexpr(src3)));
+ put_dpr_pair(0, mkexpr(dst3)); /* put the result in FPR 0,2 */
+ put_gpr_w1(1, mkU32(0x0));
+ s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src3, gr0);
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F64_TO_D128)));
+
+ /* D128 -> F64 */
+ assign(src4, get_dpr_pair(4)); /* get source from FPR 4,6 */
+ assign(dst4, binop(Iop_D128toF64, irrm, mkexpr(src4)));
+ put_fpr_dw0(0, mkexpr(dst4)); /* put the result in FPR 0,2 */
+ put_gpr_w1(1, mkU32(0x0));
+ s390_cc_thunk_put1d128Z(S390_CC_OP_PFPO_128, src4, gr0);
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D128_TO_F64)));
+
+ /* F128 -> D128 */
+ assign(src5, get_fpr_pair(4)); /* get source from FPR 4,6 */
+ assign(dst5, binop(Iop_F128toD128, irrm, mkexpr(src5)));
+ put_dpr_pair(0, mkexpr(dst5)); /* put the result in FPR 0,2 */
+ put_gpr_w1(1, mkU32(0x0));
+ s390_cc_thunk_put1f128Z(S390_CC_OP_PFPO_128, src5, gr0);
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F128_TO_D128)));
+
+ /* D128 -> F128 */
+ assign(src6, get_dpr_pair(4)); /* get source from FPR 4,6 */
+ assign(dst6, binop(Iop_D128toF128, irrm, mkexpr(src6)));
+ put_fpr_pair(0, mkexpr(dst6)); /* put the result in FPR 0,2 */
+ put_gpr_w1(1, mkU32(0x0));
+ s390_cc_thunk_put1d128Z(S390_CC_OP_PFPO_128, src6, gr0);
+ next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D128_TO_F128)));
+
+ return "pfpo";
+}
+
static const HChar *
s390_irgen_RLL(UChar r1, UChar r3, IRTemp op2addr)
{
case 0x0102: /* UPT */ goto unimplemented;
case 0x0104: /* PTFF */ goto unimplemented;
case 0x0107: /* SCKPF */ goto unimplemented;
- case 0x010a: /* PFPO */ goto unimplemented;
+ case 0x010a: s390_format_E(s390_irgen_PFPO); goto ok;
case 0x010b: /* TAM */ goto unimplemented;
case 0x010c: /* SAM24 */ goto unimplemented;
case 0x010d: /* SAM31 */ goto unimplemented;
}
break;
+ case S390_INSN_FP_CONVERT: {
+ s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+ addHRegUse(u, HRmWrite, fp_convert->dst_hi);
+ if (! hregIsInvalid(fp_convert->dst_lo))
+ addHRegUse(u, HRmWrite, fp_convert->dst_lo);
+ addHRegUse(u, HRmRead, fp_convert->op_hi);
+ if (! hregIsInvalid(fp_convert->op_lo))
+ addHRegUse(u, HRmRead, fp_convert->op_lo);
+ addHRegUse(u, HRmWrite, fp_convert->r1);
+ break;
+ }
+
case S390_INSN_MIMM:
s390_amode_get_reg_usage(u, insn->variant.mimm.dst);
break;
}
break;
+ case S390_INSN_FP_CONVERT: {
+ s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+ fp_convert->dst_hi = lookupHRegRemap(m, fp_convert->dst_hi);
+ if (! hregIsInvalid(fp_convert->dst_lo))
+ fp_convert->dst_lo = lookupHRegRemap(m, fp_convert->dst_lo);
+ fp_convert->op_hi = lookupHRegRemap(m, fp_convert->op_hi);
+ if (! hregIsInvalid(fp_convert->op_lo))
+ fp_convert->op_lo = lookupHRegRemap(m, fp_convert->op_lo);
+ fp_convert->r1 = lookupHRegRemap(m, fp_convert->r1);
+ break;
+ }
+
case S390_INSN_MIMM:
s390_amode_map_regs(m, insn->variant.mimm.dst);
break;
}
+static UChar *
+emit_E(UChar *p, UInt op)
+{
+ ULong the_insn = op;
+
+ return emit_2bytes(p, the_insn);
+}
+
+
+static UChar *
+s390_emit_PFPO(UChar *p)
+{
+ vassert(s390_host_has_pfpo);
+ if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+ s390_disasm(ENC1(MNM), "pfpo");
+ }
+
+ return emit_E(p, 0x010a);
+}
+
+
static UChar *
s390_emit_QADTR(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
{
}
+s390_insn *
+s390_insn_fp_convert(UChar size, s390_fp_conv_t tag, HReg dst, HReg op,
+ HReg r1, s390_dfp_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_fp_convert *fp_convert = LibVEX_Alloc(sizeof(s390_fp_convert));
+
+ vassert(size == 4 || size == 8);
+
+ insn->tag = S390_INSN_FP_CONVERT;
+ insn->size = size;
+ insn->variant.fp_convert.details = fp_convert;
+
+ fp_convert->tag = tag;
+ fp_convert->dst_hi = dst;
+ fp_convert->op_hi = op;
+ fp_convert->r1 = r1;
+ fp_convert->dst_lo = INVALID_HREG;
+ fp_convert->op_lo = INVALID_HREG;
+ fp_convert->rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
+s390_insn *
+s390_insn_fp128_convert(UChar size, s390_fp_conv_t tag, HReg dst_hi,
+ HReg dst_lo, HReg op_hi, HReg op_lo, HReg r1,
+ s390_dfp_round_t rounding_mode)
+{
+ s390_insn *insn = LibVEX_Alloc(sizeof(s390_insn));
+ s390_fp_convert *fp_convert = LibVEX_Alloc(sizeof(s390_fp_convert));
+
+ vassert(size == 16);
+
+ insn->tag = S390_INSN_FP_CONVERT;
+ insn->size = size;
+ insn->variant.fp_convert.details = fp_convert;
+
+ fp_convert->tag = tag;
+ fp_convert->dst_hi = dst_hi;
+ fp_convert->dst_lo = dst_lo;
+ fp_convert->op_hi = op_hi;
+ fp_convert->r1 = r1;
+ fp_convert->op_lo = op_lo;
+ fp_convert->rounding_mode = rounding_mode;
+
+ return insn;
+}
+
+
s390_insn *
s390_insn_dfp128_binop(UChar size, s390_dfp_binop_t tag, HReg dst_hi,
HReg dst_lo, HReg op2_hi, HReg op2_lo, HReg op3_hi,
insn->variant.dfp_reround.op3_hi);
break;
+ case S390_INSN_FP_CONVERT: {
+ s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+ switch (fp_convert->tag) {
+ case S390_FP_F64_TO_D64: op = "v-f2d"; break;
+ case S390_FP_D64_TO_F64: op = "v-d2f"; break;
+ case S390_FP_F64_TO_D128: op = "v-f2d"; break;
+ case S390_FP_D128_TO_F64: op = "v-d2f"; break;
+ case S390_FP_F128_TO_D128: op = "v-f2d"; break;
+ case S390_FP_D128_TO_F128: op = "v-d2f"; break;
+ default: goto fail;
+ }
+ s390_sprintf(buf, "%M %R,%R", op, fp_convert->dst_hi,
+ fp_convert->op_hi);
+ break;
+ }
+
case S390_INSN_MFENCE:
s390_sprintf(buf, "%M", "v-mfence");
return buf; /* avoid printing "size = ..." which is meaningless */
goto common;
}
+ case S390_INSN_FP_CONVERT: {
+ s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+ switch (fp_convert->tag) {
+ case S390_FP_F64_TO_D64:
+ case S390_FP_D64_TO_F64:
+ case S390_FP_F64_TO_D128: p += vex_sprintf(p, "8 -> "); goto common;
+ case S390_FP_D128_TO_F64:
+ case S390_FP_F128_TO_D128:
+ case S390_FP_D128_TO_F128: p += vex_sprintf(p, "16 -> "); goto common;
+ default:
+ goto common;
+ }
+ }
+
default:
goto common;
}
}
+static UChar *
+s390_insn_fp_convert_emit(UChar *buf, const s390_insn *insn)
+{
+ UInt pfpo;
+ s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+ s390_dfp_round_t rm = fp_convert->rounding_mode;
+
+ vassert(rm < 2 || rm > 7);
+
+ switch (fp_convert->tag) {
+ case S390_FP_F64_TO_D64: pfpo = S390_PFPO_F64_TO_D64 << 8; break;
+ case S390_FP_D64_TO_F64: pfpo = S390_PFPO_D64_TO_F64 << 8; break;
+ case S390_FP_F64_TO_D128: pfpo = S390_PFPO_F64_TO_D128 << 8; break;
+ case S390_FP_D128_TO_F64: pfpo = S390_PFPO_D128_TO_F64 << 8; break;
+ case S390_FP_F128_TO_D128: pfpo = S390_PFPO_F128_TO_D128 << 8; break;
+ case S390_FP_D128_TO_F128: pfpo = S390_PFPO_D128_TO_F128 << 8; break;
+ default: goto fail;
+ }
+
+ pfpo = pfpo | rm;
+ buf = s390_emit_load_32imm(buf, R0, pfpo);
+ buf = s390_emit_PFPO(buf);
+ return buf;
+
+ fail:
+ vpanic("s390_insn_fp_convert_emit");
+}
+
+
static UChar *
s390_insn_mfence_emit(UChar *buf, const s390_insn *insn)
{
end = s390_insn_dfp_reround_emit(buf, insn);
break;
+ case S390_INSN_FP_CONVERT:
+ end = s390_insn_fp_convert_emit(buf, insn);
+ break;
+
case S390_INSN_MFENCE:
end = s390_insn_mfence_emit(buf, insn);
break;
S390_INSN_DFP_COMPARE,
S390_INSN_DFP_CONVERT,
S390_INSN_DFP_REROUND,
+ S390_INSN_FP_CONVERT,
S390_INSN_MFENCE,
S390_INSN_MIMM, /* Assign an immediate constant to a memory location */
S390_INSN_MADD, /* Add a value to a memory location */
S390_DFP_D128_TO_U64
} s390_dfp_conv_t;
+typedef enum {
+ S390_FP_F64_TO_D64,
+ S390_FP_D64_TO_F64,
+ S390_FP_F64_TO_D128,
+ S390_FP_D128_TO_F64,
+ S390_FP_F128_TO_D128,
+ S390_FP_D128_TO_F128
+} s390_fp_conv_t;
+
/* The kind of binary DFP operations */
typedef enum {
S390_DFP_ADD,
HReg op3_lo; /* 128-bit operand low part */
} s390_dfp_binop;
+typedef struct {
+ s390_fp_conv_t tag;
+ s390_dfp_round_t rounding_mode;
+ HReg dst_hi; /* 128-bit result high part; 32/64-bit result */
+ HReg dst_lo; /* 128-bit result low part */
+ HReg op_hi; /* 128-bit operand high part; 32/64-bit opnd */
+ HReg op_lo; /* 128-bit operand low part */
+ HReg r1; /* clobbered register GPR #1 */
+} s390_fp_convert;
+
typedef struct {
s390_insn_tag tag;
/* Usually, this is the size of the result of an operation.
HReg op_hi; /* 128-bit operand high part; 64-bit opnd */
HReg op_lo; /* 128-bit operand low part */
} dfp_convert;
+ struct {
+ s390_fp_convert *details;
+ } fp_convert;
struct {
s390_dfp_cmp_t tag;
HReg dst; /* condition code in s390 encoding */
HReg op, s390_dfp_round_t);
s390_insn *s390_insn_dfp_reround(UChar size, HReg dst, HReg op2, HReg op3,
s390_dfp_round_t);
+s390_insn *s390_insn_fp_convert(UChar size, s390_fp_conv_t tag,
+ HReg dst, HReg op, HReg r1, s390_dfp_round_t);
+s390_insn *s390_insn_fp128_convert(UChar size, s390_fp_conv_t tag,
+ HReg dst_hi, HReg dst_lo, HReg op_hi,
+ HReg op_lo, HReg r1, s390_dfp_round_t);
s390_insn *s390_insn_dfp128_binop(UChar size, s390_dfp_binop_t, HReg dst_hi,
HReg dst_lo, HReg op2_hi, HReg op2_lo,
HReg op3_hi, HReg op3_lo,
(s390_host_hwcaps & (VEX_HWCAPS_S390X_FPEXT))
#define s390_host_has_lsc \
(s390_host_hwcaps & (VEX_HWCAPS_S390X_LSC))
+#define s390_host_has_pfpo \
+ (s390_host_hwcaps & (VEX_HWCAPS_S390X_PFPO))
#endif /* ndef __VEX_HOST_S390_DEFS_H */
/* --------- BINARY OP --------- */
case Iex_Binop: {
- HReg op_hi, op_lo, f12, f13, f14, f15;
+ switch (expr->Iex.Binop.op) {
+ case Iop_SqrtF128: {
+ HReg op_hi, op_lo, f12, f13, f14, f15;
- /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
- f12 = make_fpr(12);
- f13 = make_fpr(13);
- f14 = make_fpr(14);
- f15 = make_fpr(15);
+ /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+ f12 = make_fpr(12);
+ f13 = make_fpr(13);
+ f14 = make_fpr(14);
+ f15 = make_fpr(15);
- switch (expr->Iex.Binop.op) {
- case Iop_SqrtF128:
s390_isel_float128_expr(&op_hi, &op_lo, env, expr->Iex.Binop.arg2);
/* operand --> (f13, f15) */
addInstr(env, s390_insn_move(8, *dst_hi, f12));
addInstr(env, s390_insn_move(8, *dst_lo, f14));
return;
+ }
case Iop_F64HLtoF128:
*dst_hi = s390_isel_float_expr(env, expr->Iex.Binop.arg1);
*dst_lo = s390_isel_float_expr(env, expr->Iex.Binop.arg2);
return;
+ case Iop_D128toF128: {
+ IRExpr *irrm;
+ IRExpr *left;
+ s390_dfp_round_t rm;
+ HReg op_hi, op_lo;
+ HReg f0, f2, f4, f6, r1; /* real registers used by PFPO */
+
+ f4 = make_fpr(4); /* source */
+ f6 = make_fpr(6); /* source */
+ f0 = make_fpr(0); /* destination */
+ f2 = make_fpr(2); /* destination */
+ r1 = make_gpr(1); /* GPR #1 clobbered */
+
+ irrm = expr->Iex.Binop.arg1;
+ left = expr->Iex.Binop.arg2;
+ rm = get_dfp_rounding_mode(env, irrm);
+ s390_isel_dfp128_expr(&op_hi, &op_lo, env, left);
+ /* operand --> (f4, f6) */
+ addInstr(env, s390_insn_move(8, f4, op_hi));
+ addInstr(env, s390_insn_move(8, f6, op_lo));
+ addInstr(env, s390_insn_fp128_convert(16, S390_FP_D128_TO_F128, f0, f2,
+ f4, f6, r1, rm));
+ /* (f0, f2) --> destination */
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, f0));
+ addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+ return;
+ }
+
default:
goto irreducible;
}
IRExpr *left = expr->Iex.Binop.arg2;
HReg h1, dst;
s390_bfp_conv_t conv;
+ s390_fp_conv_t fpconv;
switch (op) {
case Iop_SqrtF32:
case Iop_I64StoF64: conv = S390_BFP_I64_TO_F64; goto convert_int;
case Iop_I64UtoF32: conv = S390_BFP_U64_TO_F32; goto convert_int;
case Iop_I64UtoF64: conv = S390_BFP_U64_TO_F64; goto convert_int;
+ case Iop_D64toF64: fpconv = S390_FP_D64_TO_F64; goto convert_dfp;
+ case Iop_D128toF64: fpconv = S390_FP_D128_TO_F64; goto convert_dfp128;
convert_float:
h1 = s390_isel_float_expr(env, left);
rounding_mode));
return dst;
}
-
+
+ convert_dfp: {
+ s390_dfp_round_t rm;
+ HReg f0, f4, r1; /* real registers used by PFPO */
+
+ f4 = make_fpr(4); /* source */
+ f0 = make_fpr(0); /* destination */
+ r1 = make_gpr(1); /* GPR #1 clobbered */
+ h1 = s390_isel_dfp_expr(env, left);
+ dst = newVRegF(env);
+ rm = get_dfp_rounding_mode(env, irrm);
+ /* operand --> f4 */
+ addInstr(env, s390_insn_move(8, f4, h1));
+ addInstr(env, s390_insn_fp_convert(size, fpconv, f0, f4, r1, rm));
+ /* f0 --> destination */
+ addInstr(env, s390_insn_move(8, dst, f0));
+ return dst;
+ }
+
+ convert_dfp128: {
+ s390_dfp_round_t rm;
+ HReg op_hi, op_lo;
+ HReg f0, f4, f6, r1; /* real registers used by PFPO */
+
+ f4 = make_fpr(4); /* source */
+ f6 = make_fpr(6); /* source */
+ f0 = make_fpr(0); /* destination */
+ r1 = make_gpr(1); /* GPR #1 clobbered */
+ s390_isel_dfp128_expr(&op_hi, &op_lo, env, left);
+ dst = newVRegF(env);
+ rm = get_dfp_rounding_mode(env, irrm);
+ /* operand --> (f4, f6) */
+ addInstr(env, s390_insn_move(8, f4, op_hi));
+ addInstr(env, s390_insn_move(8, f6, op_lo));
+ addInstr(env, s390_insn_fp128_convert(16, fpconv, f0, INVALID_HREG,
+ f4, f6, r1, rm));
+ /* f0 --> destination */
+ addInstr(env, s390_insn_move(8, dst, f0));
+ return dst;
+ }
+
default:
goto irreducible;
return;
}
+ case Iop_F64toD128: {
+ IRExpr *irrm;
+ IRExpr *left;
+ s390_dfp_round_t rm;
+ HReg h1; /* virtual reg. to hold source */
+ HReg f0, f2, f4, r1; /* real registers used by PFPO */
+
+ f4 = make_fpr(4); /* source */
+ f0 = make_fpr(0); /* destination */
+ f2 = make_fpr(2); /* destination */
+ r1 = make_gpr(1); /* GPR #1 clobbered */
+ irrm = expr->Iex.Binop.arg1;
+ left = expr->Iex.Binop.arg2;
+ rm = get_dfp_rounding_mode(env, irrm);
+ h1 = s390_isel_float_expr(env, left);
+ addInstr(env, s390_insn_move(8, f4, h1));
+ addInstr(env, s390_insn_fp128_convert(16, S390_FP_F64_TO_D128, f0, f2,
+ f4, INVALID_HREG, r1, rm));
+ /* (f0, f2) --> destination */
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, f0));
+ addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+ return;
+ }
+
+ case Iop_F128toD128: {
+ IRExpr *irrm;
+ IRExpr *left;
+ s390_dfp_round_t rm;
+ HReg op_hi, op_lo;
+ HReg f0, f2, f4, f6, r1; /* real registers used by PFPO */
+
+ f4 = make_fpr(4); /* source */
+ f6 = make_fpr(6); /* source */
+ f0 = make_fpr(0); /* destination */
+ f2 = make_fpr(2); /* destination */
+ r1 = make_gpr(1); /* GPR #1 clobbered */
+
+ irrm = expr->Iex.Binop.arg1;
+ left = expr->Iex.Binop.arg2;
+ rm = get_dfp_rounding_mode(env, irrm);
+ s390_isel_float128_expr(&op_hi, &op_lo, env, left);
+ /* operand --> (f4, f6) */
+ addInstr(env, s390_insn_move(8, f4, op_hi));
+ addInstr(env, s390_insn_move(8, f6, op_lo));
+ addInstr(env, s390_insn_fp128_convert(16, S390_FP_F128_TO_D128, f0, f2,
+ f4, f6, r1, rm));
+ /* (f0, f2) --> destination */
+ *dst_hi = newVRegF(env);
+ *dst_lo = newVRegF(env);
+ addInstr(env, s390_insn_move(8, *dst_hi, f0));
+ addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+ return;
+ }
+
default:
goto irreducible;
}
IRExpr *left = expr->Iex.Binop.arg2;
HReg h1, dst;
s390_dfp_conv_t conv;
+ s390_fp_conv_t fpconv;
switch (op) {
case Iop_D64toD32: conv = S390_DFP_D64_TO_D32; goto convert_dfp;
case Iop_I64StoD64: conv = S390_DFP_I64_TO_D64; goto convert_int;
case Iop_I64UtoD64: conv = S390_DFP_U64_TO_D64; goto convert_int;
+ case Iop_F64toD64: fpconv = S390_FP_F64_TO_D64; goto convert_bfp;
convert_dfp:
h1 = s390_isel_dfp_expr(env, left);
return dst;
}
+ convert_bfp: {
+ s390_dfp_round_t rm;
+ HReg f0, f4, r1; /* real registers used by PFPO */
+
+ f4 = make_fpr(4); /* source */
+ f0 = make_fpr(0); /* destination */
+ r1 = make_gpr(1); /* GPR #1 clobbered */
+ h1 = s390_isel_float_expr(env, left);
+ dst = newVRegF(env);
+ rm = get_dfp_rounding_mode(env, irrm);
+ /* operand --> f4 */
+ addInstr(env, s390_insn_move(8, f4, h1));
+ addInstr(env, s390_insn_fp_convert(size, fpconv, f0, f4, r1, rm));
+ /* f0 --> destination */
+ addInstr(env, s390_insn_move(8, dst, f0));
+ return dst;
+ }
+
case Iop_D128toD64: {
HReg op_hi, op_lo, f13, f15;
s390_dfp_round_t rounding_mode;
return "Encountered an instruction that requires the floating "
"point extension facility.\n"
" That facility is not available on this host";
+ case EmFail_S390X_invalid_PFPO_rounding_mode:
+ return "The rounding mode specified in GPR 0 for PFPO instruction"
+ " is invalid";
+ case EmFail_S390X_invalid_PFPO_function:
+ return "The function code specified in GPR 0 for PFPO instruction"
+ " is invalid";
default:
vpanic("LibVEX_EmNote_string: unknown warning");
}
{ VEX_HWCAPS_S390X_STCKF, "stckf" },
{ VEX_HWCAPS_S390X_FPEXT, "fpext" },
{ VEX_HWCAPS_S390X_LSC, "lsc" },
+ { VEX_HWCAPS_S390X_PFPO, "pfpo" },
};
#define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0])
static HChar buf[sizeof prefix +
S390_FPC_DFP_ROUND_PREPARE_SHORT = 7
} s390_fpc_dfp_round_t;
+/* PFPO function code as it is encoded in bits [33:55] of GP0
+ when PFPO insn is executed. */
+typedef enum {
+ S390_PFPO_F64_TO_D64 = 0x010906,
+ S390_PFPO_D64_TO_F64 = 0x010609,
+ S390_PFPO_F64_TO_D128 = 0x010A06,
+ S390_PFPO_D128_TO_F64 = 0x01060A,
+ S390_PFPO_F128_TO_D128 = 0x010A07,
+ S390_PFPO_D128_TO_F128 = 0x01070A
+} s390_pfpo_function_t;
/* The length of the longest mnemonic: locgrnhe */
#define S390_MAX_MNEMONIC_LEN 8
#define VEX_HWCAPS_S390X_STCKF (1<<14) /* STCKF facility */
#define VEX_HWCAPS_S390X_FPEXT (1<<15) /* Floating point extension facility */
#define VEX_HWCAPS_S390X_LSC (1<<16) /* Conditional load/store facility */
+#define VEX_HWCAPS_S390X_PFPO (1<<17) /* Perform floating point ops facility */
/* Special value representing all available s390x hwcaps */
#define VEX_HWCAPS_S390X_ALL (VEX_HWCAPS_S390X_LDISP | \
VEX_HWCAPS_S390X_FPEXT | \
VEX_HWCAPS_S390X_LSC | \
VEX_HWCAPS_S390X_ETF3 | \
- VEX_HWCAPS_S390X_ETF2)
+ VEX_HWCAPS_S390X_ETF2 | \
+ VEX_HWCAPS_S390X_PFPO)
#define VEX_HWCAPS_S390X(x) ((x) & ~VEX_S390X_MODEL_MASK)
#define VEX_S390X_MODEL(x) ((x) & VEX_S390X_MODEL_MASK)
available on this host */
EmFail_S390X_fpext,
+ /* GPR 0 contains invalid rounding mode for PFPO instruction */
+ EmFail_S390X_invalid_PFPO_rounding_mode,
+
+ /* The function code specified in GPR 0 executed by PFPO
+ instruction is invalid */
+ EmFail_S390X_invalid_PFPO_function,
+
EmNote_NUMBER
}
VexEmNote;