TRANS(ABS_s, do_scalar1_d, a, tcg_gen_abs_i64)
TRANS(NEG_s, do_scalar1_d, a, tcg_gen_neg_i64)
+static bool do_cmop0_d(DisasContext *s, arg_rr *a, TCGCond cond)
+{
+ if (fp_access_check(s)) {
+ TCGv_i64 t = read_fp_dreg(s, a->rn);
+ tcg_gen_negsetcond_i64(cond, t, t, tcg_constant_i64(0));
+ write_fp_dreg(s, a->rd, t);
+ }
+ return true;
+}
+
+TRANS(CMGT0_s, do_cmop0_d, a, TCG_COND_GT)
+TRANS(CMGE0_s, do_cmop0_d, a, TCG_COND_GE)
+TRANS(CMLE0_s, do_cmop0_d, a, TCG_COND_LE)
+TRANS(CMLT0_s, do_cmop0_d, a, TCG_COND_LT)
+TRANS(CMEQ0_s, do_cmop0_d, a, TCG_COND_EQ)
+
static bool do_gvec_fn2(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn)
{
if (!a->q && a->esz == MO_64) {
TRANS(NOT_v, do_gvec_fn2, a, tcg_gen_gvec_not)
TRANS(CNT_v, do_gvec_fn2, a, gen_gvec_cnt)
TRANS(RBIT_v, do_gvec_fn2, a, gen_gvec_rbit)
+TRANS(CMGT0_v, do_gvec_fn2, a, gen_gvec_cgt0)
+TRANS(CMGE0_v, do_gvec_fn2, a, gen_gvec_cge0)
+TRANS(CMLT0_v, do_gvec_fn2, a, gen_gvec_clt0)
+TRANS(CMLE0_v, do_gvec_fn2, a, gen_gvec_cle0)
+TRANS(CMEQ0_v, do_gvec_fn2, a, gen_gvec_ceq0)
static bool do_gvec_fn2_bhs(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn)
{
* The caller only need provide tcg_rmode and tcg_fpstatus if the op
* requires them.
*/
- TCGCond cond;
-
switch (opcode) {
- case 0xa: /* CMLT */
- cond = TCG_COND_LT;
- do_cmop:
- /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
- tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_constant_i64(0));
- break;
- case 0x8: /* CMGT, CMGE */
- cond = u ? TCG_COND_GE : TCG_COND_GT;
- goto do_cmop;
- case 0x9: /* CMEQ, CMLE */
- cond = u ? TCG_COND_LE : TCG_COND_EQ;
- goto do_cmop;
case 0x2f: /* FABS */
gen_vfp_absd(tcg_rd, tcg_rn);
break;
case 0x4: /* CLS, CLZ */
case 0x5: /* NOT */
case 0x7: /* SQABS, SQNEG */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xa: /* CMLT */
case 0xb: /* ABS, NEG */
g_assert_not_reached();
}
TCGv_ptr tcg_fpstatus;
switch (opcode) {
- case 0xa: /* CMLT */
- if (u) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x8: /* CMGT, CMGE */
- case 0x9: /* CMEQ, CMLE */
- if (size != 3) {
- unallocated_encoding(s);
- return;
- }
- break;
case 0x12: /* SQXTUN */
if (!u) {
unallocated_encoding(s);
default:
case 0x3: /* USQADD / SUQADD */
case 0x7: /* SQABS / SQNEG */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xa: /* CMLT */
case 0xb: /* ABS, NEG */
unallocated_encoding(s);
return;
}
handle_shll(s, is_q, size, rn, rd);
return;
- case 0xa: /* CMLT */
- if (u == 1) {
- unallocated_encoding(s);
- return;
- }
- /* fall through */
- case 0x8: /* CMGT, CMGE */
- case 0x9: /* CMEQ, CMLE */
- if (size == 3 && !is_q) {
- unallocated_encoding(s);
- return;
- }
- break;
case 0xc ... 0xf:
case 0x16 ... 0x1f:
{
case 0x4: /* CLS, CLZ */
case 0x5: /* CNT, NOT, RBIT */
case 0x7: /* SQABS, SQNEG */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xa: /* CMLT */
case 0xb: /* ABS, NEG */
unallocated_encoding(s);
return;
tcg_rmode = NULL;
}
- switch (opcode) {
- case 0x8: /* CMGT, CMGE */
- if (u) {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
- } else {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
- }
- return;
- case 0x9: /* CMEQ, CMLE */
- if (u) {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
- } else {
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
- }
- return;
- case 0xa: /* CMLT */
- gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
- return;
- case 0x4: /* CLZ, CLS */
- case 0x5: /* CNT, NOT, RBIT */
- case 0xb:
- g_assert_not_reached();
- }
-
if (size == 3) {
/* All 64-bit element operations can be shared with scalar 2misc */
int pass;