Add an expander for isinf using integer arithmetic. This is
typically faster and avoids generating spurious exceptions on
signaling NaNs. This fixes part of PR66462.
int isinf1 (float x) { return __builtin_isinf (x); }
Before:
fabs s0, s0
mov w0,
2139095039
fmov s31, w0
fcmp s0, s31
cset w0, le
eor w0, w0, 1
ret
After:
fmov w1, s0
mov w0, -
16777216
cmp w0, w1, lsl 1
cset w0, eq
ret
gcc:
PR middle-end/66462
* config/aarch64/aarch64.md (isinf<mode>2): Add new expander.
* config/aarch64/iterators.md (mantissa_bits): Add new mode_attr.
gcc/testsuite:
PR middle-end/66462
* gcc.target/aarch64/pr66462.c: Add new test.
[(set_attr "type" "fcmp<stype>")]
)
-(define_insn "*cmp_swp_<shift>_reg<mode>"
+(define_insn "cmp_swp_<shift>_reg<mode>"
[(set (reg:CC_SWP CC_REGNUM)
(compare:CC_SWP (ASHIFT:GPI
(match_operand:GPI 0 "register_operand" "r")
}
)
+(define_expand "isinf<mode>2"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand:GPF 1 "register_operand")]
+ "TARGET_FLOAT"
+{
+ rtx op = force_lowpart_subreg (<V_INT_EQUIV>mode, operands[1], <MODE>mode);
+ rtx tmp = gen_reg_rtx (<V_INT_EQUIV>mode);
+ emit_move_insn (tmp, GEN_INT (HOST_WIDE_INT_M1U << (<mantissa_bits> + 1)));
+ rtx cc_reg = gen_rtx_REG (CC_SWPmode, CC_REGNUM);
+ emit_insn (gen_cmp_swp_lsl_reg<v_int_equiv> (op, GEN_INT (1), tmp));
+ rtx cmp = gen_rtx_fmt_ee (EQ, SImode, cc_reg, const0_rtx);
+ emit_insn (gen_aarch64_cstoresi (operands[0], cmp, cc_reg));
+ DONE;
+}
+)
+
;; -------------------------------------------------------------------
;; Reload support
;; -------------------------------------------------------------------
(define_mode_attr half_mask [(HI "255") (SI "65535") (DI "4294967295")])
+(define_mode_attr mantissa_bits [(SF "23") (DF "52")])
+
;; For constraints used in scalar immediate vector moves
(define_mode_attr hq [(HI "h") (QI "q")])
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fsignaling-nans -fno-inline" } */
+/* { dg-require-effective-target fenv_exceptions } */
+
+#include <fenv.h>
+
+static void t_inff (float x, bool res)
+{
+ if (__builtin_isinff (x) != res)
+ __builtin_abort ();
+ if (__builtin_isinff (-x) != res)
+ __builtin_abort ();
+ if (fetestexcept (FE_INVALID))
+ __builtin_abort ();
+}
+
+static void t_inf (double x, bool res)
+{
+ if (__builtin_isinf (x) != res)
+ __builtin_abort ();
+ if (__builtin_isinf (-x) != res)
+ __builtin_abort ();
+ if (fetestexcept (FE_INVALID))
+ __builtin_abort ();
+}
+
+int
+main ()
+{
+ feclearexcept (FE_INVALID);
+
+ t_inff (0.0f, 0);
+ t_inff (1.0f, 0);
+ t_inff (__builtin_inff (), 1);
+ t_inff (__builtin_nansf (""), 0);
+ t_inff (__builtin_nanf (""), 0);
+
+ t_inf (0.0, 0);
+ t_inf (1.0, 0);
+ t_inf (__builtin_inf (), 1);
+ t_inf (__builtin_nans (""), 0);
+ t_inf (__builtin_nan (""), 0);
+
+ return 0;
+}