fold_convert (type, tem));
}
+ /* Fold __complex__ ( x, 0 ) + __complex__ ( 0, y )
+ to __complex__ ( x, y ). This is not the same for SNaNs or
+ if singed zeros are involved. */
+ if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
+ && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
+ && COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0)))
+ {
+ tree rtype = TREE_TYPE (TREE_TYPE (arg0));
+ tree arg0r = fold_unary (REALPART_EXPR, rtype, arg0);
+ tree arg0i = fold_unary (IMAGPART_EXPR, rtype, arg0);
+ bool arg0rz = false, arg0iz = false;
+ if ((arg0r && (arg0rz = real_zerop (arg0r)))
+ || (arg0i && (arg0iz = real_zerop (arg0i))))
+ {
+ tree arg1r = fold_unary (REALPART_EXPR, rtype, arg1);
+ tree arg1i = fold_unary (IMAGPART_EXPR, rtype, arg1);
+ if (arg0rz && arg1i && real_zerop (arg1i))
+ {
+ tree rp = arg1r ? arg1r
+ : build1 (REALPART_EXPR, rtype, arg1);
+ tree ip = arg0i ? arg0i
+ : build1 (IMAGPART_EXPR, rtype, arg0);
+ return fold_build2 (COMPLEX_EXPR, type, rp, ip);
+ }
+ else if (arg0iz && arg1r && real_zerop (arg1r))
+ {
+ tree rp = arg0r ? arg0r
+ : build1 (REALPART_EXPR, rtype, arg0);
+ tree ip = arg1i ? arg1i
+ : build1 (IMAGPART_EXPR, rtype, arg1);
+ return fold_build2 (COMPLEX_EXPR, type, rp, ip);
+ }
+ }
+ }
+
if (flag_unsafe_math_optimizations
&& (TREE_CODE (arg0) == RDIV_EXPR || TREE_CODE (arg0) == MULT_EXPR)
&& (TREE_CODE (arg1) == RDIV_EXPR || TREE_CODE (arg1) == MULT_EXPR)
}
}
+ /* Fold z * +-I to __complex__ (-+__imag z, +-__real z).
+ This is not the same for NaNs or if singed zeros are
+ involved. */
+ if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))
+ && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
+ && COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0))
+ && TREE_CODE (arg1) == COMPLEX_CST
+ && real_zerop (TREE_REALPART (arg1)))
+ {
+ tree rtype = TREE_TYPE (TREE_TYPE (arg0));
+ if (real_onep (TREE_IMAGPART (arg1)))
+ return fold_build2 (COMPLEX_EXPR, type,
+ negate_expr (fold_build1 (IMAGPART_EXPR,
+ rtype, arg0)),
+ fold_build1 (REALPART_EXPR, rtype, arg0));
+ else if (real_minus_onep (TREE_IMAGPART (arg1)))
+ return fold_build2 (COMPLEX_EXPR, type,
+ fold_build1 (IMAGPART_EXPR, rtype, arg0),
+ negate_expr (fold_build1 (REALPART_EXPR,
+ rtype, arg0)));
+ }
+
/* Optimize z * conj(z) for floating point complex numbers.
Guarded by flag_unsafe_math_optimizations as non-finite
imaginary components don't produce scalar results. */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-funsafe-math-optimizations -ffinite-math-only -fdump-tree-gimple" } */
+
+_Complex double test1 (double x) { return x + 1.i; }
+_Complex double test2 (double x) { return 1 + x * 1.i; }
+_Complex double test3 (double x, double y) { return x + y * 1.i; }
+_Complex double test4 (double x, double y) { return (x + y * 1.i) * 1.i; }
+_Complex double test5 (double x, double y) { return (x + y * 1.i) * -1.i; }
+
+/* { dg-final { scan-tree-dump "COMPLEX_EXPR <x, 1.0e\\+0>" "gimple" } } */
+/* { dg-final { scan-tree-dump "COMPLEX_EXPR <1.0e\\+0, x>" "gimple" } } */
+/* { dg-final { scan-tree-dump "COMPLEX_EXPR <x, y>" "gimple" } } */
+/* { dg-final { scan-tree-dump "D.* = -y;\n.*COMPLEX_EXPR <D.*, x>" "gimple" } } */
+/* { dg-final { scan-tree-dump "D.* = -x;\n.*COMPLEX_EXPR <y, D.*>" "gimple" } } */
+/* { dg-final { cleanup-tree-dump "original" } } */