};
TRANS(UQXTN_s, do_2misc_narrow_scalar, a, f_scalar_uqxtn)
+static void gen_fcvtxn_sd(TCGv_i64 d, TCGv_i64 n)
+{
+ /*
+ * 64 bit to 32 bit float conversion
+ * with von Neumann rounding (round to odd)
+ */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_fcvtx_f64_to_f32(tmp, n, tcg_env);
+ tcg_gen_extu_i32_i64(d, tmp);
+}
+
+static ArithOneOp * const f_scalar_fcvtxn[] = {
+ NULL,
+ NULL,
+ gen_fcvtxn_sd,
+};
+TRANS(FCVTXN_s, do_2misc_narrow_scalar, a, f_scalar_fcvtxn)
+
#undef WRAP_ENV
static bool do_gvec_fn2(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn)
gen_fcvtn_sd,
};
TRANS(FCVTN_v, do_2misc_narrow_vector, a, f_vector_fcvtn)
+TRANS(FCVTXN_v, do_2misc_narrow_vector, a, f_scalar_fcvtxn)
static void gen_bfcvtn_hs(TCGv_i64 d, TCGv_i64 n)
{
}
}
-static void handle_2misc_narrow(DisasContext *s, bool scalar,
- int opcode, bool u, bool is_q,
- int size, int rn, int rd)
-{
- /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
- * in the source becomes a size element in the destination).
- */
- int pass;
- TCGv_i64 tcg_res[2];
- int destelt = is_q ? 2 : 0;
- int passes = scalar ? 1 : 2;
-
- if (scalar) {
- tcg_res[1] = tcg_constant_i64(0);
- }
-
- for (pass = 0; pass < passes; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- NeonGenOne64OpFn *genfn = NULL;
- NeonGenOne64OpEnvFn *genenvfn = NULL;
-
- if (scalar) {
- read_vec_element(s, tcg_op, rn, pass, size + 1);
- } else {
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- }
- tcg_res[pass] = tcg_temp_new_i64();
-
- switch (opcode) {
- case 0x56: /* FCVTXN, FCVTXN2 */
- {
- /*
- * 64 bit to 32 bit float conversion
- * with von Neumann rounding (round to odd)
- */
- TCGv_i32 tmp = tcg_temp_new_i32();
- assert(size == 2);
- gen_helper_fcvtx_f64_to_f32(tmp, tcg_op, tcg_env);
- tcg_gen_extu_i32_i64(tcg_res[pass], tmp);
- }
- break;
- default:
- case 0x12: /* XTN, SQXTUN */
- case 0x14: /* SQXTN, UQXTN */
- case 0x16: /* FCVTN, FCVTN2 */
- case 0x36: /* BFCVTN, BFCVTN2 */
- g_assert_not_reached();
- }
-
- if (genfn) {
- genfn(tcg_res[pass], tcg_op);
- } else if (genenvfn) {
- genenvfn(tcg_res[pass], tcg_env, tcg_op);
- }
- }
-
- for (pass = 0; pass < 2; pass++) {
- write_vec_element(s, tcg_res[pass], rd, destelt + pass, MO_32);
- }
- clear_vec_high(s, is_q, rd);
-}
-
/* AdvSIMD scalar two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +-----+---+-----------+------+-----------+--------+-----+------+------+
rmode = FPROUNDING_TIEAWAY;
break;
case 0x56: /* FCVTXN, FCVTXN2 */
- if (size == 2) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
- return;
default:
unallocated_encoding(s);
return;
}
handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
return;
- case 0x56: /* FCVTXN, FCVTXN2 */
- if (size == 2) {
- unallocated_encoding(s);
- return;
- }
- if (!fp_access_check(s)) {
- return;
- }
- handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
- return;
case 0x17: /* FCVTL, FCVTL2 */
if (!fp_access_check(s)) {
return;
default:
case 0x16: /* FCVTN, FCVTN2 */
case 0x36: /* BFCVTN, BFCVTN2 */
+ case 0x56: /* FCVTXN, FCVTXN2 */
unallocated_encoding(s);
return;
}