]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
re PR target/88234 (UBsan and runtime error: signed integer overflow using unsigned...
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
85ec4feb 5 Copyright (C) 2014-2018 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
f3582e54 32 CONSTANT_CLASS_P
887ab609 33 tree_expr_nonnegative_p
e36c1cfe 34 tree_expr_nonzero_p
67dbe582 35 integer_valued_real_p
53a19317
RB
36 integer_pow2p
37 HONOR_NANS)
e0ee10ed 38
f84e7fd6
RB
39/* Operator lists. */
40(define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42(define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44(define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
46(define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
48(define_operator_list simple_comparison lt le eq ne ge gt)
49(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
b1dc4a20 51#include "cfn-operators.pd"
257aecb4 52
543a9bcd
RS
53/* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
543a9bcd
RS
73DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
0d2b3bca
RS
77
78/* Binary operations and their associated IFN_COND_* function. */
79(define_operator_list UNCOND_BINARY
80 plus minus
6c4fd4a9 81 mult trunc_div trunc_mod rdiv
0d2b3bca
RS
82 min max
83 bit_and bit_ior bit_xor)
84(define_operator_list COND_BINARY
85 IFN_COND_ADD IFN_COND_SUB
6c4fd4a9 86 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
0d2b3bca
RS
87 IFN_COND_MIN IFN_COND_MAX
88 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR)
b41d1f6e
RS
89
90/* Same for ternary operations. */
91(define_operator_list UNCOND_TERNARY
92 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
93(define_operator_list COND_TERNARY
94 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
03cc70b5 95
ed73f46f
MG
96/* As opposed to convert?, this still creates a single pattern, so
97 it is not a suitable replacement for convert? in all cases. */
98(match (nop_convert @0)
99 (convert @0)
100 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
101(match (nop_convert @0)
102 (view_convert @0)
103 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
928686b1
RS
104 && known_eq (TYPE_VECTOR_SUBPARTS (type),
105 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
ed73f46f
MG
106 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
107/* This one has to be last, or it shadows the others. */
108(match (nop_convert @0)
03cc70b5 109 @0)
f84e7fd6 110
e197e64e
KV
111/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
112 ABSU_EXPR returns unsigned absolute value of the operand and the operand
113 of the ABSU_EXPR will have the corresponding signed type. */
114(simplify (abs (convert @0))
115 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
116 && !TYPE_UNSIGNED (TREE_TYPE (@0))
117 && element_precision (type) > element_precision (TREE_TYPE (@0)))
118 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
119 (convert (absu:utype @0)))))
120
121
e0ee10ed 122/* Simplifications of operations with one constant operand and
36a60e48 123 simplifications to constants or single values. */
e0ee10ed
RB
124
125(for op (plus pointer_plus minus bit_ior bit_xor)
126 (simplify
127 (op @0 integer_zerop)
128 (non_lvalue @0)))
129
a499aac5
RB
130/* 0 +p index -> (type)index */
131(simplify
132 (pointer_plus integer_zerop @1)
133 (non_lvalue (convert @1)))
134
d43177ad
MG
135/* ptr - 0 -> (type)ptr */
136(simplify
137 (pointer_diff @0 integer_zerop)
138 (convert @0))
139
a7f24614
RB
140/* See if ARG1 is zero and X + ARG1 reduces to X.
141 Likewise if the operands are reversed. */
142(simplify
143 (plus:c @0 real_zerop@1)
144 (if (fold_real_zero_addition_p (type, @1, 0))
145 (non_lvalue @0)))
146
147/* See if ARG1 is zero and X - ARG1 reduces to X. */
148(simplify
149 (minus @0 real_zerop@1)
150 (if (fold_real_zero_addition_p (type, @1, 1))
151 (non_lvalue @0)))
152
e0ee10ed
RB
153/* Simplify x - x.
154 This is unsafe for certain floats even in non-IEEE formats.
155 In IEEE, it is unsafe because it does wrong for NaNs.
156 Also note that operand_equal_p is always false if an operand
157 is volatile. */
158(simplify
a7f24614 159 (minus @0 @0)
1b457aa4 160 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 161 { build_zero_cst (type); }))
1af4ebf5
MG
162(simplify
163 (pointer_diff @@0 @0)
164 { build_zero_cst (type); })
e0ee10ed
RB
165
166(simplify
a7f24614
RB
167 (mult @0 integer_zerop@1)
168 @1)
169
170/* Maybe fold x * 0 to 0. The expressions aren't the same
171 when x is NaN, since x * 0 is also NaN. Nor are they the
172 same in modes with signed zeros, since multiplying a
173 negative value by 0 gives -0, not +0. */
174(simplify
175 (mult @0 real_zerop@1)
8b5ee871 176 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
177 @1))
178
179/* In IEEE floating point, x*1 is not equivalent to x for snans.
180 Likewise for complex arithmetic with signed zeros. */
181(simplify
182 (mult @0 real_onep)
8b5ee871
MG
183 (if (!HONOR_SNANS (type)
184 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
185 || !COMPLEX_FLOAT_TYPE_P (type)))
186 (non_lvalue @0)))
187
188/* Transform x * -1.0 into -x. */
189(simplify
190 (mult @0 real_minus_onep)
8b5ee871
MG
191 (if (!HONOR_SNANS (type)
192 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
193 || !COMPLEX_FLOAT_TYPE_P (type)))
194 (negate @0)))
e0ee10ed 195
8c2805bb
AP
196(for cmp (gt ge lt le)
197 outp (convert convert negate negate)
198 outn (negate negate convert convert)
199 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
200 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
201 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
202 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
203 (simplify
204 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
205 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
206 && types_match (type, TREE_TYPE (@0)))
207 (switch
208 (if (types_match (type, float_type_node))
209 (BUILT_IN_COPYSIGNF @1 (outp @0)))
210 (if (types_match (type, double_type_node))
211 (BUILT_IN_COPYSIGN @1 (outp @0)))
212 (if (types_match (type, long_double_type_node))
213 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
214 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
215 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
216 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
217 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
218 (simplify
219 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
220 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
221 && types_match (type, TREE_TYPE (@0)))
222 (switch
223 (if (types_match (type, float_type_node))
224 (BUILT_IN_COPYSIGNF @1 (outn @0)))
225 (if (types_match (type, double_type_node))
226 (BUILT_IN_COPYSIGN @1 (outn @0)))
227 (if (types_match (type, long_double_type_node))
228 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
229
230/* Transform X * copysign (1.0, X) into abs(X). */
231(simplify
c6cfa2bf 232 (mult:c @0 (COPYSIGN_ALL real_onep @0))
8c2805bb
AP
233 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
234 (abs @0)))
235
236/* Transform X * copysign (1.0, -X) into -abs(X). */
237(simplify
c6cfa2bf 238 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
8c2805bb
AP
239 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
240 (negate (abs @0))))
241
242/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
243(simplify
c6cfa2bf 244 (COPYSIGN_ALL REAL_CST@0 @1)
8c2805bb 245 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
c6cfa2bf 246 (COPYSIGN_ALL (negate @0) @1)))
8c2805bb 247
5b7f6ed0 248/* X * 1, X / 1 -> X. */
e0ee10ed
RB
249(for op (mult trunc_div ceil_div floor_div round_div exact_div)
250 (simplify
251 (op @0 integer_onep)
252 (non_lvalue @0)))
253
71f82be9
JG
254/* (A / (1 << B)) -> (A >> B).
255 Only for unsigned A. For signed A, this would not preserve rounding
256 toward zero.
257 For example: (-1 / ( 1 << B)) != -1 >> B. */
258(simplify
259 (trunc_div @0 (lshift integer_onep@1 @2))
260 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
261 && (!VECTOR_TYPE_P (type)
262 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
263 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
264 (rshift @0 @2)))
265
5b7f6ed0
MG
266/* Preserve explicit divisions by 0: the C++ front-end wants to detect
267 undefined behavior in constexpr evaluation, and assuming that the division
268 traps enables better optimizations than these anyway. */
a7f24614 269(for div (trunc_div ceil_div floor_div round_div exact_div)
5b7f6ed0
MG
270 /* 0 / X is always zero. */
271 (simplify
272 (div integer_zerop@0 @1)
273 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
274 (if (!integer_zerop (@1))
275 @0))
da186c1f 276 /* X / -1 is -X. */
a7f24614 277 (simplify
09240451
MG
278 (div @0 integer_minus_onep@1)
279 (if (!TYPE_UNSIGNED (type))
da186c1f 280 (negate @0)))
5b7f6ed0
MG
281 /* X / X is one. */
282 (simplify
283 (div @0 @0)
9ebce098
JJ
284 /* But not for 0 / 0 so that we can get the proper warnings and errors.
285 And not for _Fract types where we can't build 1. */
286 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
5b7f6ed0 287 { build_one_cst (type); }))
03cc70b5 288 /* X / abs (X) is X < 0 ? -1 : 1. */
da186c1f 289 (simplify
d96a5585
RB
290 (div:C @0 (abs @0))
291 (if (INTEGRAL_TYPE_P (type)
da186c1f
RB
292 && TYPE_OVERFLOW_UNDEFINED (type))
293 (cond (lt @0 { build_zero_cst (type); })
294 { build_minus_one_cst (type); } { build_one_cst (type); })))
295 /* X / -X is -1. */
296 (simplify
d96a5585 297 (div:C @0 (negate @0))
da186c1f
RB
298 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
299 && TYPE_OVERFLOW_UNDEFINED (type))
300 { build_minus_one_cst (type); })))
a7f24614
RB
301
302/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
303 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
304(simplify
305 (floor_div @0 @1)
09240451
MG
306 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
307 && TYPE_UNSIGNED (type))
a7f24614
RB
308 (trunc_div @0 @1)))
309
28093105
RB
310/* Combine two successive divisions. Note that combining ceil_div
311 and floor_div is trickier and combining round_div even more so. */
312(for div (trunc_div exact_div)
c306cfaf
RB
313 (simplify
314 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
315 (with {
4a669ac3 316 wi::overflow_type overflow;
8e6cdc90 317 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 318 TYPE_SIGN (type), &overflow);
c306cfaf 319 }
4a669ac3 320 (if (!overflow)
8fdc6c67
RB
321 (div @0 { wide_int_to_tree (type, mul); })
322 (if (TYPE_UNSIGNED (type)
323 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
324 { build_zero_cst (type); })))))
c306cfaf 325
288fe52e
AM
326/* Combine successive multiplications. Similar to above, but handling
327 overflow is different. */
328(simplify
329 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
330 (with {
4a669ac3 331 wi::overflow_type overflow;
8e6cdc90 332 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 333 TYPE_SIGN (type), &overflow);
288fe52e
AM
334 }
335 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
336 otherwise undefined overflow implies that @0 must be zero. */
4a669ac3 337 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
288fe52e
AM
338 (mult @0 { wide_int_to_tree (type, mul); }))))
339
a7f24614 340/* Optimize A / A to 1.0 if we don't care about
09240451 341 NaNs or Infinities. */
a7f24614
RB
342(simplify
343 (rdiv @0 @0)
09240451 344 (if (FLOAT_TYPE_P (type)
1b457aa4 345 && ! HONOR_NANS (type)
8b5ee871 346 && ! HONOR_INFINITIES (type))
09240451
MG
347 { build_one_cst (type); }))
348
349/* Optimize -A / A to -1.0 if we don't care about
350 NaNs or Infinities. */
351(simplify
e04d2a35 352 (rdiv:C @0 (negate @0))
09240451 353 (if (FLOAT_TYPE_P (type)
1b457aa4 354 && ! HONOR_NANS (type)
8b5ee871 355 && ! HONOR_INFINITIES (type))
09240451 356 { build_minus_one_cst (type); }))
a7f24614 357
8c6961ca
PK
358/* PR71078: x / abs(x) -> copysign (1.0, x) */
359(simplify
360 (rdiv:C (convert? @0) (convert? (abs @0)))
361 (if (SCALAR_FLOAT_TYPE_P (type)
362 && ! HONOR_NANS (type)
363 && ! HONOR_INFINITIES (type))
364 (switch
365 (if (types_match (type, float_type_node))
366 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
367 (if (types_match (type, double_type_node))
368 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
369 (if (types_match (type, long_double_type_node))
370 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
371
a7f24614
RB
372/* In IEEE floating point, x/1 is not equivalent to x for snans. */
373(simplify
374 (rdiv @0 real_onep)
8b5ee871 375 (if (!HONOR_SNANS (type))
a7f24614
RB
376 (non_lvalue @0)))
377
378/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
379(simplify
380 (rdiv @0 real_minus_onep)
8b5ee871 381 (if (!HONOR_SNANS (type))
a7f24614
RB
382 (negate @0)))
383
5711ac88 384(if (flag_reciprocal_math)
81825e28 385 /* Convert (A/B)/C to A/(B*C). */
5711ac88
N
386 (simplify
387 (rdiv (rdiv:s @0 @1) @2)
81825e28
WD
388 (rdiv @0 (mult @1 @2)))
389
390 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
391 (simplify
392 (rdiv @0 (mult:s @1 REAL_CST@2))
393 (with
394 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
395 (if (tem)
396 (rdiv (mult @0 { tem; } ) @1))))
5711ac88
N
397
398 /* Convert A/(B/C) to (A/B)*C */
399 (simplify
400 (rdiv @0 (rdiv:s @1 @2))
401 (mult (rdiv @0 @1) @2)))
402
6a435314
WD
403/* Simplify x / (- y) to -x / y. */
404(simplify
405 (rdiv @0 (negate @1))
406 (rdiv (negate @0) @1))
407
5e21d765
WD
408(if (flag_unsafe_math_optimizations)
409 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
410 Since C / x may underflow to zero, do this only for unsafe math. */
411 (for op (lt le gt ge)
412 neg_op (gt ge lt le)
413 (simplify
414 (op (rdiv REAL_CST@0 @1) real_zerop@2)
415 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
416 (switch
417 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
418 (op @1 @2))
419 /* For C < 0, use the inverted operator. */
420 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
421 (neg_op @1 @2)))))))
422
5711ac88
N
423/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
424(for div (trunc_div ceil_div floor_div round_div exact_div)
425 (simplify
426 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
427 (if (integer_pow2p (@2)
428 && tree_int_cst_sgn (@2) > 0
a1488398 429 && tree_nop_conversion_p (type, TREE_TYPE (@0))
8e6cdc90
RS
430 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
431 (rshift (convert @0)
432 { build_int_cst (integer_type_node,
433 wi::exact_log2 (wi::to_wide (@2))); }))))
5711ac88 434
a7f24614
RB
435/* If ARG1 is a constant, we can convert this to a multiply by the
436 reciprocal. This does not have the same rounding properties,
437 so only do this if -freciprocal-math. We can actually
438 always safely do it if ARG1 is a power of two, but it's hard to
439 tell if it is or not in a portable manner. */
440(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
441 (simplify
442 (rdiv @0 cst@1)
443 (if (optimize)
53bc4b3a
RB
444 (if (flag_reciprocal_math
445 && !real_zerop (@1))
a7f24614 446 (with
249700b5 447 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 448 (if (tem)
8fdc6c67
RB
449 (mult @0 { tem; } )))
450 (if (cst != COMPLEX_CST)
451 (with { tree inverse = exact_inverse (type, @1); }
452 (if (inverse)
453 (mult @0 { inverse; } ))))))))
a7f24614 454
a7f24614 455(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
456 /* 0 % X is always zero. */
457 (simplify
a7f24614 458 (mod integer_zerop@0 @1)
e0ee10ed
RB
459 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
460 (if (!integer_zerop (@1))
461 @0))
462 /* X % 1 is always zero. */
463 (simplify
a7f24614
RB
464 (mod @0 integer_onep)
465 { build_zero_cst (type); })
466 /* X % -1 is zero. */
467 (simplify
09240451
MG
468 (mod @0 integer_minus_onep@1)
469 (if (!TYPE_UNSIGNED (type))
bc4315fb 470 { build_zero_cst (type); }))
5b7f6ed0
MG
471 /* X % X is zero. */
472 (simplify
473 (mod @0 @0)
474 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
475 (if (!integer_zerop (@0))
476 { build_zero_cst (type); }))
bc4315fb
MG
477 /* (X % Y) % Y is just X % Y. */
478 (simplify
479 (mod (mod@2 @0 @1) @1)
98e30e51
RB
480 @2)
481 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
482 (simplify
483 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
484 (if (ANY_INTEGRAL_TYPE_P (type)
485 && TYPE_OVERFLOW_UNDEFINED (type)
8e6cdc90
RS
486 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
487 TYPE_SIGN (type)))
392750c5
JJ
488 { build_zero_cst (type); }))
489 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
490 modulo and comparison, since it is simpler and equivalent. */
491 (for cmp (eq ne)
492 (simplify
493 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
494 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
495 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
496 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
a7f24614
RB
497
498/* X % -C is the same as X % C. */
499(simplify
500 (trunc_mod @0 INTEGER_CST@1)
501 (if (TYPE_SIGN (type) == SIGNED
502 && !TREE_OVERFLOW (@1)
8e6cdc90 503 && wi::neg_p (wi::to_wide (@1))
a7f24614
RB
504 && !TYPE_OVERFLOW_TRAPS (type)
505 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
506 && !sign_bit_p (@1, @1))
507 (trunc_mod @0 (negate @1))))
e0ee10ed 508
8f0c696a
RB
509/* X % -Y is the same as X % Y. */
510(simplify
511 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
512 (if (INTEGRAL_TYPE_P (type)
513 && !TYPE_UNSIGNED (type)
8f0c696a 514 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
515 && tree_nop_conversion_p (type, TREE_TYPE (@1))
516 /* Avoid this transformation if X might be INT_MIN or
517 Y might be -1, because we would then change valid
518 INT_MIN % -(-1) into invalid INT_MIN % -1. */
8e6cdc90 519 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
20b8d734
JJ
520 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
521 (TREE_TYPE (@1))))))
8f0c696a
RB
522 (trunc_mod @0 (convert @1))))
523
f461569a
MP
524/* X - (X / Y) * Y is the same as X % Y. */
525(simplify
2eef1fc1
RB
526 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
527 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
fba46f03 528 (convert (trunc_mod @0 @1))))
f461569a 529
8f0c696a
RB
530/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
531 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
532 Also optimize A % (C << N) where C is a power of 2,
533 to A & ((C << N) - 1). */
534(match (power_of_two_cand @1)
535 INTEGER_CST@1)
536(match (power_of_two_cand @1)
537 (lshift INTEGER_CST@1 @2))
538(for mod (trunc_mod floor_mod)
539 (simplify
4ab1e111 540 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
541 (if ((TYPE_UNSIGNED (type)
542 || tree_expr_nonnegative_p (@0))
4ab1e111 543 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 544 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 545 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 546
887ab609
N
547/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
548(simplify
549 (trunc_div (mult @0 integer_pow2p@1) @1)
550 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
551 (bit_and @0 { wide_int_to_tree
8e6cdc90
RS
552 (type, wi::mask (TYPE_PRECISION (type)
553 - wi::exact_log2 (wi::to_wide (@1)),
887ab609
N
554 false, TYPE_PRECISION (type))); })))
555
5f8d832e
N
556/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
557(simplify
558 (mult (trunc_div @0 integer_pow2p@1) @1)
559 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
560 (bit_and @0 (negate @1))))
561
95765f36
N
562/* Simplify (t * 2) / 2) -> t. */
563(for div (trunc_div ceil_div floor_div round_div exact_div)
564 (simplify
55d84e61 565 (div (mult:c @0 @1) @1)
95765f36
N
566 (if (ANY_INTEGRAL_TYPE_P (type)
567 && TYPE_OVERFLOW_UNDEFINED (type))
568 @0)))
569
d202f9bd 570(for op (negate abs)
9b054b08
RS
571 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
572 (for coss (COS COSH)
573 (simplify
574 (coss (op @0))
575 (coss @0)))
576 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
577 (for pows (POW)
578 (simplify
579 (pows (op @0) REAL_CST@1)
580 (with { HOST_WIDE_INT n; }
581 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 582 (pows @0 @1)))))
de3fbea3
RB
583 /* Likewise for powi. */
584 (for pows (POWI)
585 (simplify
586 (pows (op @0) INTEGER_CST@1)
8e6cdc90 587 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 588 (pows @0 @1))))
5d3498b4
RS
589 /* Strip negate and abs from both operands of hypot. */
590 (for hypots (HYPOT)
591 (simplify
592 (hypots (op @0) @1)
593 (hypots @0 @1))
594 (simplify
595 (hypots @0 (op @1))
596 (hypots @0 @1)))
597 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
c6cfa2bf 598 (for copysigns (COPYSIGN_ALL)
5d3498b4
RS
599 (simplify
600 (copysigns (op @0) @1)
601 (copysigns @0 @1))))
602
603/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
604(simplify
605 (mult (abs@1 @0) @1)
606 (mult @0 @0))
607
64f7ea7c
KV
608/* Convert absu(x)*absu(x) -> x*x. */
609(simplify
610 (mult (absu@1 @0) @1)
611 (mult (convert@2 @0) @2))
612
5d3498b4
RS
613/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
614(for coss (COS COSH)
615 copysigns (COPYSIGN)
616 (simplify
617 (coss (copysigns @0 @1))
618 (coss @0)))
619
620/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
621(for pows (POW)
622 copysigns (COPYSIGN)
623 (simplify
de3fbea3 624 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
625 (with { HOST_WIDE_INT n; }
626 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
627 (pows @0 @1)))))
de3fbea3
RB
628/* Likewise for powi. */
629(for pows (POWI)
630 copysigns (COPYSIGN)
631 (simplify
632 (pows (copysigns @0 @2) INTEGER_CST@1)
8e6cdc90 633 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 634 (pows @0 @1))))
5d3498b4
RS
635
636(for hypots (HYPOT)
637 copysigns (COPYSIGN)
638 /* hypot(copysign(x, y), z) -> hypot(x, z). */
639 (simplify
640 (hypots (copysigns @0 @1) @2)
641 (hypots @0 @2))
642 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
643 (simplify
644 (hypots @0 (copysigns @1 @2))
645 (hypots @0 @1)))
646
eeb57981 647/* copysign(x, CST) -> [-]abs (x). */
c6cfa2bf 648(for copysigns (COPYSIGN_ALL)
eeb57981
RB
649 (simplify
650 (copysigns @0 REAL_CST@1)
651 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
652 (negate (abs @0))
653 (abs @0))))
654
5d3498b4 655/* copysign(copysign(x, y), z) -> copysign(x, z). */
c6cfa2bf 656(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
657 (simplify
658 (copysigns (copysigns @0 @1) @2)
659 (copysigns @0 @2)))
660
661/* copysign(x,y)*copysign(x,y) -> x*x. */
c6cfa2bf 662(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
663 (simplify
664 (mult (copysigns@2 @0 @1) @2)
665 (mult @0 @0)))
666
667/* ccos(-x) -> ccos(x). Similarly for ccosh. */
668(for ccoss (CCOS CCOSH)
669 (simplify
670 (ccoss (negate @0))
671 (ccoss @0)))
d202f9bd 672
abcc43f5
RS
673/* cabs(-x) and cos(conj(x)) -> cabs(x). */
674(for ops (conj negate)
675 (for cabss (CABS)
676 (simplify
677 (cabss (ops @0))
678 (cabss @0))))
679
0a8f32b8
RB
680/* Fold (a * (1 << b)) into (a << b) */
681(simplify
682 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
683 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 684 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
685 (lshift @0 @2)))
686
4349b15f
SD
687/* Fold (1 << (C - x)) where C = precision(type) - 1
688 into ((1 << C) >> x). */
689(simplify
690 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
691 (if (INTEGRAL_TYPE_P (type)
56ccfbd6 692 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
4349b15f
SD
693 && single_use (@1))
694 (if (TYPE_UNSIGNED (type))
695 (rshift (lshift @0 @2) @3)
696 (with
697 { tree utype = unsigned_type_for (type); }
698 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
699
0a8f32b8
RB
700/* Fold (C1/X)*C2 into (C1*C2)/X. */
701(simplify
ff86345f
RB
702 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
703 (if (flag_associative_math
704 && single_use (@3))
0a8f32b8
RB
705 (with
706 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
707 (if (tem)
708 (rdiv { tem; } @1)))))
709
710/* Simplify ~X & X as zero. */
711(simplify
712 (bit_and:c (convert? @0) (convert? (bit_not @0)))
713 { build_zero_cst (type); })
714
89b80c42
PK
715/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
716(simplify
717 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
718 (if (TYPE_UNSIGNED (type))
719 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
720
7aa13860
PK
721(for bitop (bit_and bit_ior)
722 cmp (eq ne)
a93952d2
JJ
723 /* PR35691: Transform
724 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
725 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
7aa13860
PK
726 (simplify
727 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
728 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
a93952d2
JJ
729 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
730 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
731 (cmp (bit_ior @0 (convert @1)) @2)))
732 /* Transform:
733 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
734 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
735 (simplify
736 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
737 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
738 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
739 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
740 (cmp (bit_and @0 (convert @1)) @2))))
7aa13860 741
10158317
RB
742/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
743(simplify
a9658b11 744 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
745 (minus (bit_xor @0 @1) @1))
746(simplify
747 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
8e6cdc90 748 (if (~wi::to_wide (@2) == wi::to_wide (@1))
10158317
RB
749 (minus (bit_xor @0 @1) @1)))
750
751/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
752(simplify
a8e9f9a3 753 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
754 (minus @1 (bit_xor @0 @1)))
755
42bd89ce
MG
756/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
757(for op (bit_ior bit_xor plus)
758 (simplify
759 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
760 (bit_xor @0 @1))
761 (simplify
762 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
8e6cdc90 763 (if (~wi::to_wide (@2) == wi::to_wide (@1))
42bd89ce 764 (bit_xor @0 @1))))
2066ef6a
PK
765
766/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
767(simplify
768 (bit_ior:c (bit_xor:c @0 @1) @0)
769 (bit_ior @0 @1))
770
e268a77b
MG
771/* (a & ~b) | (a ^ b) --> a ^ b */
772(simplify
773 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
774 @2)
775
776/* (a & ~b) ^ ~a --> ~(a & b) */
777(simplify
778 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
779 (bit_not (bit_and @0 @1)))
780
781/* (a | b) & ~(a ^ b) --> a & b */
782(simplify
783 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
784 (bit_and @0 @1))
785
786/* a | ~(a ^ b) --> a | ~b */
787(simplify
788 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
789 (bit_ior @0 (bit_not @1)))
790
791/* (a | b) | (a &^ b) --> a | b */
792(for op (bit_and bit_xor)
793 (simplify
794 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
795 @2))
796
797/* (a & b) | ~(a ^ b) --> ~(a ^ b) */
798(simplify
799 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
800 @2)
801
802/* ~(~a & b) --> a | ~b */
803(simplify
804 (bit_not (bit_and:cs (bit_not @0) @1))
805 (bit_ior @0 (bit_not @1)))
806
fd8303a5
MC
807/* ~(~a | b) --> a & ~b */
808(simplify
809 (bit_not (bit_ior:cs (bit_not @0) @1))
810 (bit_and @0 (bit_not @1)))
811
d982c5b7
MG
812/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
813#if GIMPLE
814(simplify
815 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
816 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 817 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
d982c5b7
MG
818 (bit_xor @0 @1)))
819#endif
10158317 820
f2901002
JJ
821/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
822 ((A & N) + B) & M -> (A + B) & M
823 Similarly if (N & M) == 0,
824 ((A | N) + B) & M -> (A + B) & M
825 and for - instead of + (or unary - instead of +)
826 and/or ^ instead of |.
827 If B is constant and (B & M) == 0, fold into A & M. */
828(for op (plus minus)
829 (for bitop (bit_and bit_ior bit_xor)
830 (simplify
831 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
832 (with
833 { tree pmop[2];
834 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
835 @3, @4, @1, ERROR_MARK, NULL_TREE,
836 NULL_TREE, pmop); }
837 (if (utype)
838 (convert (bit_and (op (convert:utype { pmop[0]; })
839 (convert:utype { pmop[1]; }))
840 (convert:utype @2))))))
841 (simplify
842 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
843 (with
844 { tree pmop[2];
845 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
846 NULL_TREE, NULL_TREE, @1, bitop, @3,
847 @4, pmop); }
848 (if (utype)
849 (convert (bit_and (op (convert:utype { pmop[0]; })
850 (convert:utype { pmop[1]; }))
851 (convert:utype @2)))))))
852 (simplify
853 (bit_and (op:s @0 @1) INTEGER_CST@2)
854 (with
855 { tree pmop[2];
856 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
857 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
858 NULL_TREE, NULL_TREE, pmop); }
859 (if (utype)
860 (convert (bit_and (op (convert:utype { pmop[0]; })
861 (convert:utype { pmop[1]; }))
862 (convert:utype @2)))))))
863(for bitop (bit_and bit_ior bit_xor)
864 (simplify
865 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
866 (with
867 { tree pmop[2];
868 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
869 bitop, @2, @3, NULL_TREE, ERROR_MARK,
870 NULL_TREE, NULL_TREE, pmop); }
871 (if (utype)
872 (convert (bit_and (negate (convert:utype { pmop[0]; }))
873 (convert:utype @1)))))))
874
bc4315fb
MG
875/* X % Y is smaller than Y. */
876(for cmp (lt ge)
877 (simplify
878 (cmp (trunc_mod @0 @1) @1)
879 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
880 { constant_boolean_node (cmp == LT_EXPR, type); })))
881(for cmp (gt le)
882 (simplify
883 (cmp @1 (trunc_mod @0 @1))
884 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
885 { constant_boolean_node (cmp == GT_EXPR, type); })))
886
e0ee10ed
RB
887/* x | ~0 -> ~0 */
888(simplify
ca0b7ece
RB
889 (bit_ior @0 integer_all_onesp@1)
890 @1)
891
892/* x | 0 -> x */
893(simplify
894 (bit_ior @0 integer_zerop)
895 @0)
e0ee10ed
RB
896
897/* x & 0 -> 0 */
898(simplify
ca0b7ece
RB
899 (bit_and @0 integer_zerop@1)
900 @1)
e0ee10ed 901
a4398a30 902/* ~x | x -> -1 */
8b5ee871
MG
903/* ~x ^ x -> -1 */
904/* ~x + x -> -1 */
905(for op (bit_ior bit_xor plus)
906 (simplify
907 (op:c (convert? @0) (convert? (bit_not @0)))
908 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 909
e0ee10ed
RB
910/* x ^ x -> 0 */
911(simplify
912 (bit_xor @0 @0)
913 { build_zero_cst (type); })
914
36a60e48
RB
915/* Canonicalize X ^ ~0 to ~X. */
916(simplify
917 (bit_xor @0 integer_all_onesp@1)
918 (bit_not @0))
919
920/* x & ~0 -> x */
921(simplify
922 (bit_and @0 integer_all_onesp)
923 (non_lvalue @0))
924
925/* x & x -> x, x | x -> x */
926(for bitop (bit_and bit_ior)
927 (simplify
928 (bitop @0 @0)
929 (non_lvalue @0)))
930
c7986356
MG
931/* x & C -> x if we know that x & ~C == 0. */
932#if GIMPLE
933(simplify
934 (bit_and SSA_NAME@0 INTEGER_CST@1)
935 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 936 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
c7986356
MG
937 @0))
938#endif
939
0f770b01
RV
940/* x + (x & 1) -> (x + 1) & ~1 */
941(simplify
44fc0a51
RB
942 (plus:c @0 (bit_and:s @0 integer_onep@1))
943 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
944
945/* x & ~(x & y) -> x & ~y */
946/* x | ~(x | y) -> x | ~y */
947(for bitop (bit_and bit_ior)
af563d4b 948 (simplify
44fc0a51
RB
949 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
950 (bitop @0 (bit_not @1))))
af563d4b 951
03cc70b5
MC
952/* (~x & y) | ~(x | y) -> ~x */
953(simplify
954 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
955 @2)
956
957/* (x | y) ^ (x | ~y) -> ~x */
958(simplify
959 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
960 (bit_not @0))
961
962/* (x & y) | ~(x | y) -> ~(x ^ y) */
963(simplify
964 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
965 (bit_not (bit_xor @0 @1)))
966
967/* (~x | y) ^ (x ^ y) -> x | ~y */
968(simplify
969 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
970 (bit_ior @0 (bit_not @1)))
971
972/* (x ^ y) | ~(x | y) -> ~(x & y) */
973(simplify
974 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
975 (bit_not (bit_and @0 @1)))
976
af563d4b
MG
977/* (x | y) & ~x -> y & ~x */
978/* (x & y) | ~x -> y | ~x */
979(for bitop (bit_and bit_ior)
980 rbitop (bit_ior bit_and)
981 (simplify
982 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
983 (bitop @1 @2)))
0f770b01 984
f13c4673
MP
985/* (x & y) ^ (x | y) -> x ^ y */
986(simplify
2d6f2dce
MP
987 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
988 (bit_xor @0 @1))
f13c4673 989
9ea65ca6
MP
990/* (x ^ y) ^ (x | y) -> x & y */
991(simplify
992 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
993 (bit_and @0 @1))
994
995/* (x & y) + (x ^ y) -> x | y */
996/* (x & y) | (x ^ y) -> x | y */
997/* (x & y) ^ (x ^ y) -> x | y */
998(for op (plus bit_ior bit_xor)
999 (simplify
1000 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1001 (bit_ior @0 @1)))
1002
1003/* (x & y) + (x | y) -> x + y */
1004(simplify
1005 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1006 (plus @0 @1))
1007
9737efaf
MP
1008/* (x + y) - (x | y) -> x & y */
1009(simplify
1010 (minus (plus @0 @1) (bit_ior @0 @1))
1011 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1012 && !TYPE_SATURATING (type))
1013 (bit_and @0 @1)))
1014
1015/* (x + y) - (x & y) -> x | y */
1016(simplify
1017 (minus (plus @0 @1) (bit_and @0 @1))
1018 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1019 && !TYPE_SATURATING (type))
1020 (bit_ior @0 @1)))
1021
9ea65ca6
MP
1022/* (x | y) - (x ^ y) -> x & y */
1023(simplify
1024 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1025 (bit_and @0 @1))
1026
1027/* (x | y) - (x & y) -> x ^ y */
1028(simplify
1029 (minus (bit_ior @0 @1) (bit_and @0 @1))
1030 (bit_xor @0 @1))
1031
66cc6273
MP
1032/* (x | y) & ~(x & y) -> x ^ y */
1033(simplify
1034 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1035 (bit_xor @0 @1))
1036
1037/* (x | y) & (~x ^ y) -> x & y */
1038(simplify
1039 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1040 (bit_and @0 @1))
1041
fd8303a5
MC
1042/* (~x | y) & (x | ~y) -> ~(x ^ y) */
1043(simplify
1044 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1045 (bit_not (bit_xor @0 @1)))
1046
1047/* (~x | y) ^ (x | ~y) -> x ^ y */
1048(simplify
1049 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1050 (bit_xor @0 @1))
1051
5b00d921
RB
1052/* ~x & ~y -> ~(x | y)
1053 ~x | ~y -> ~(x & y) */
1054(for op (bit_and bit_ior)
1055 rop (bit_ior bit_and)
1056 (simplify
1057 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1058 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1059 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1060 (bit_not (rop (convert @0) (convert @1))))))
1061
14ea9f92 1062/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
1063 with a constant, and the two constants have no bits in common,
1064 we should treat this as a BIT_IOR_EXPR since this may produce more
1065 simplifications. */
14ea9f92
RB
1066(for op (bit_xor plus)
1067 (simplify
1068 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1069 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1070 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1071 && tree_nop_conversion_p (type, TREE_TYPE (@2))
8e6cdc90 1072 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
14ea9f92 1073 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
1074
1075/* (X | Y) ^ X -> Y & ~ X*/
1076(simplify
2eef1fc1 1077 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
5b00d921
RB
1078 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1079 (convert (bit_and @1 (bit_not @0)))))
1080
1081/* Convert ~X ^ ~Y to X ^ Y. */
1082(simplify
1083 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1084 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1085 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1086 (bit_xor (convert @0) (convert @1))))
1087
1088/* Convert ~X ^ C to X ^ ~C. */
1089(simplify
1090 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
1091 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1092 (bit_xor (convert @0) (bit_not @1))))
5b00d921 1093
e39dab2c
MG
1094/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1095(for opo (bit_and bit_xor)
1096 opi (bit_xor bit_and)
1097 (simplify
de5b5228 1098 (opo:c (opi:cs @0 @1) @1)
e39dab2c 1099 (bit_and (bit_not @0) @1)))
97e77391 1100
14ea9f92
RB
1101/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1102 operands are another bit-wise operation with a common input. If so,
1103 distribute the bit operations to save an operation and possibly two if
1104 constants are involved. For example, convert
1105 (A | B) & (A | C) into A | (B & C)
1106 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
1107(for op (bit_and bit_ior bit_xor)
1108 rop (bit_ior bit_and bit_and)
14ea9f92 1109 (simplify
2eef1fc1 1110 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
e07ab2fe
MG
1111 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1112 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
1113 (rop (convert @0) (op (convert @1) (convert @2))))))
1114
e39dab2c
MG
1115/* Some simple reassociation for bit operations, also handled in reassoc. */
1116/* (X & Y) & Y -> X & Y
1117 (X | Y) | Y -> X | Y */
1118(for op (bit_and bit_ior)
1119 (simplify
2eef1fc1 1120 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
e39dab2c
MG
1121 @2))
1122/* (X ^ Y) ^ Y -> X */
1123(simplify
2eef1fc1 1124 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
ece46666 1125 (convert @0))
e39dab2c
MG
1126/* (X & Y) & (X & Z) -> (X & Y) & Z
1127 (X | Y) | (X | Z) -> (X | Y) | Z */
1128(for op (bit_and bit_ior)
1129 (simplify
6c35e5b0 1130 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
e39dab2c
MG
1131 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1132 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1133 (if (single_use (@5) && single_use (@6))
1134 (op @3 (convert @2))
1135 (if (single_use (@3) && single_use (@4))
1136 (op (convert @1) @5))))))
1137/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1138(simplify
1139 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1140 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1141 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 1142 (bit_xor (convert @1) (convert @2))))
5b00d921 1143
64f7ea7c
KV
1144/* Convert abs (abs (X)) into abs (X).
1145 also absu (absu (X)) into absu (X). */
b14a9c57
RB
1146(simplify
1147 (abs (abs@1 @0))
1148 @1)
64f7ea7c
KV
1149
1150(simplify
1151 (absu (convert@2 (absu@1 @0)))
1152 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1153 @1))
1154
1155/* Convert abs[u] (-X) -> abs[u] (X). */
f3582e54
RB
1156(simplify
1157 (abs (negate @0))
1158 (abs @0))
64f7ea7c
KV
1159
1160(simplify
1161 (absu (negate @0))
1162 (absu @0))
1163
1164/* Convert abs[u] (X) where X is nonnegative -> (X). */
f3582e54
RB
1165(simplify
1166 (abs tree_expr_nonnegative_p@0)
1167 @0)
1168
64f7ea7c
KV
1169(simplify
1170 (absu tree_expr_nonnegative_p@0)
1171 (convert @0))
1172
55cf3946
RB
1173/* A few cases of fold-const.c negate_expr_p predicate. */
1174(match negate_expr_p
1175 INTEGER_CST
b14a9c57 1176 (if ((INTEGRAL_TYPE_P (type)
56a6d474 1177 && TYPE_UNSIGNED (type))
b14a9c57 1178 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
1179 && may_negate_without_overflow_p (t)))))
1180(match negate_expr_p
1181 FIXED_CST)
1182(match negate_expr_p
1183 (negate @0)
1184 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1185(match negate_expr_p
1186 REAL_CST
1187 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1188/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1189 ways. */
1190(match negate_expr_p
1191 VECTOR_CST
1192 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
81bd903a
MG
1193(match negate_expr_p
1194 (minus @0 @1)
1195 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1196 || (FLOAT_TYPE_P (type)
1197 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1198 && !HONOR_SIGNED_ZEROS (type)))))
0a8f32b8
RB
1199
1200/* (-A) * (-B) -> A * B */
1201(simplify
1202 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1203 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1204 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1205 (mult (convert @0) (convert (negate @1)))))
03cc70b5 1206
55cf3946 1207/* -(A + B) -> (-B) - A. */
b14a9c57 1208(simplify
55cf3946
RB
1209 (negate (plus:c @0 negate_expr_p@1))
1210 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1211 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1212 (minus (negate @1) @0)))
1213
81bd903a
MG
1214/* -(A - B) -> B - A. */
1215(simplify
1216 (negate (minus @0 @1))
1217 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1218 || (FLOAT_TYPE_P (type)
1219 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1220 && !HONOR_SIGNED_ZEROS (type)))
1221 (minus @1 @0)))
1af4ebf5
MG
1222(simplify
1223 (negate (pointer_diff @0 @1))
1224 (if (TYPE_OVERFLOW_UNDEFINED (type))
1225 (pointer_diff @1 @0)))
81bd903a 1226
55cf3946 1227/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 1228(simplify
55cf3946 1229 (minus @0 negate_expr_p@1)
e4e96a4f
KT
1230 (if (!FIXED_POINT_TYPE_P (type))
1231 (plus @0 (negate @1))))
d4573ffe 1232
5609420f
RB
1233/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1234 when profitable.
1235 For bitwise binary operations apply operand conversions to the
1236 binary operation result instead of to the operands. This allows
1237 to combine successive conversions and bitwise binary operations.
1238 We combine the above two cases by using a conditional convert. */
1239(for bitop (bit_and bit_ior bit_xor)
1240 (simplify
1241 (bitop (convert @0) (convert? @1))
1242 (if (((TREE_CODE (@1) == INTEGER_CST
1243 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 1244 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 1245 || types_match (@0, @1))
ad6f996c
RB
1246 /* ??? This transform conflicts with fold-const.c doing
1247 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1248 constants (if x has signed type, the sign bit cannot be set
1249 in c). This folds extension into the BIT_AND_EXPR.
1250 Restrict it to GIMPLE to avoid endless recursions. */
1251 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
1252 && (/* That's a good idea if the conversion widens the operand, thus
1253 after hoisting the conversion the operation will be narrower. */
1254 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1255 /* It's also a good idea if the conversion is to a non-integer
1256 mode. */
1257 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1258 /* Or if the precision of TO is not the same as the precision
1259 of its mode. */
2be65d9e 1260 || !type_has_mode_precision_p (type)))
5609420f
RB
1261 (convert (bitop @0 (convert @1))))))
1262
b14a9c57
RB
1263(for bitop (bit_and bit_ior)
1264 rbitop (bit_ior bit_and)
1265 /* (x | y) & x -> x */
1266 /* (x & y) | x -> x */
1267 (simplify
1268 (bitop:c (rbitop:c @0 @1) @0)
1269 @0)
1270 /* (~x | y) & x -> x & y */
1271 /* (~x & y) | x -> x | y */
1272 (simplify
1273 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1274 (bitop @0 @1)))
1275
5609420f
RB
1276/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1277(simplify
1278 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1279 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1280
1281/* Combine successive equal operations with constants. */
1282(for bitop (bit_and bit_ior bit_xor)
1283 (simplify
1284 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
fba05d9e
RS
1285 (if (!CONSTANT_CLASS_P (@0))
1286 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1287 folded to a constant. */
1288 (bitop @0 (bitop @1 @2))
1289 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1290 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1291 the values involved are such that the operation can't be decided at
1292 compile time. Try folding one of @0 or @1 with @2 to see whether
1293 that combination can be decided at compile time.
1294
1295 Keep the existing form if both folds fail, to avoid endless
1296 oscillation. */
1297 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1298 (if (cst1)
1299 (bitop @1 { cst1; })
1300 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1301 (if (cst2)
1302 (bitop @0 { cst2; }))))))))
5609420f
RB
1303
1304/* Try simple folding for X op !X, and X op X with the help
1305 of the truth_valued_p and logical_inverted_value predicates. */
1306(match truth_valued_p
1307 @0
1308 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 1309(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
1310 (match truth_valued_p
1311 (op @0 @1)))
1312(match truth_valued_p
1313 (truth_not @0))
1314
0a8f32b8
RB
1315(match (logical_inverted_value @0)
1316 (truth_not @0))
5609420f
RB
1317(match (logical_inverted_value @0)
1318 (bit_not truth_valued_p@0))
1319(match (logical_inverted_value @0)
09240451 1320 (eq @0 integer_zerop))
5609420f 1321(match (logical_inverted_value @0)
09240451 1322 (ne truth_valued_p@0 integer_truep))
5609420f 1323(match (logical_inverted_value @0)
09240451 1324 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
1325
1326/* X & !X -> 0. */
1327(simplify
1328 (bit_and:c @0 (logical_inverted_value @0))
1329 { build_zero_cst (type); })
1330/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1331(for op (bit_ior bit_xor)
1332 (simplify
1333 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 1334 { constant_boolean_node (true, type); }))
59c20dc7
RB
1335/* X ==/!= !X is false/true. */
1336(for op (eq ne)
1337 (simplify
1338 (op:c truth_valued_p@0 (logical_inverted_value @0))
1339 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 1340
5609420f
RB
1341/* ~~x -> x */
1342(simplify
1343 (bit_not (bit_not @0))
1344 @0)
1345
b14a9c57
RB
1346/* Convert ~ (-A) to A - 1. */
1347(simplify
1348 (bit_not (convert? (negate @0)))
ece46666
MG
1349 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1350 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 1351 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57 1352
81bd903a
MG
1353/* Convert - (~A) to A + 1. */
1354(simplify
1355 (negate (nop_convert (bit_not @0)))
1356 (plus (view_convert @0) { build_each_one_cst (type); }))
1357
b14a9c57
RB
1358/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1359(simplify
8b5ee871 1360 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
1361 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1362 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1363 (convert (negate @0))))
1364(simplify
1365 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
1366 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1367 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1368 (convert (negate @0))))
1369
1370/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1371(simplify
1372 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1373 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1374 (convert (bit_xor @0 (bit_not @1)))))
1375(simplify
1376 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1377 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1378 (convert (bit_xor @0 @1))))
1379
e268a77b
MG
1380/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1381(simplify
1382 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1383 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1384 (bit_not (bit_xor (view_convert @0) @1))))
1385
f52baa7b
MP
1386/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1387(simplify
44fc0a51
RB
1388 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1389 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 1390
f7b7b0aa
MP
1391/* Fold A - (A & B) into ~B & A. */
1392(simplify
2eef1fc1 1393 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
f7b7b0aa
MP
1394 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1395 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1396 (convert (bit_and (bit_not @1) @0))))
5609420f 1397
2071f8f9
N
1398/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1399(for cmp (gt lt ge le)
1400(simplify
1401 (mult (convert (cmp @0 @1)) @2)
1402 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1403
e36c1cfe
N
1404/* For integral types with undefined overflow and C != 0 fold
1405 x * C EQ/NE y * C into x EQ/NE y. */
1406(for cmp (eq ne)
1407 (simplify
1408 (cmp (mult:c @0 @1) (mult:c @2 @1))
1409 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1410 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1411 && tree_expr_nonzero_p (@1))
1412 (cmp @0 @2))))
1413
42bd89ce
MG
1414/* For integral types with wrapping overflow and C odd fold
1415 x * C EQ/NE y * C into x EQ/NE y. */
1416(for cmp (eq ne)
1417 (simplify
1418 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1419 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1420 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1421 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1422 (cmp @0 @2))))
1423
e36c1cfe
N
1424/* For integral types with undefined overflow and C != 0 fold
1425 x * C RELOP y * C into:
84ff66b8 1426
e36c1cfe
N
1427 x RELOP y for nonnegative C
1428 y RELOP x for negative C */
1429(for cmp (lt gt le ge)
1430 (simplify
1431 (cmp (mult:c @0 @1) (mult:c @2 @1))
1432 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1433 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1434 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1435 (cmp @0 @2)
1436 (if (TREE_CODE (@1) == INTEGER_CST
8e6cdc90 1437 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
e36c1cfe 1438 (cmp @2 @0))))))
84ff66b8 1439
564e405c
JJ
1440/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1441(for cmp (le gt)
1442 icmp (gt le)
1443 (simplify
1444 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1445 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1446 && TYPE_UNSIGNED (TREE_TYPE (@0))
1447 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
8e6cdc90
RS
1448 && (wi::to_wide (@2)
1449 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
564e405c
JJ
1450 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1451 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1452
a8492d5e
MG
1453/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1454(for cmp (simple_comparison)
1455 (simplify
1456 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
8e6cdc90 1457 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
a8492d5e
MG
1458 (cmp @0 @1))))
1459
8d1628eb
JJ
1460/* X / C1 op C2 into a simple range test. */
1461(for cmp (simple_comparison)
1462 (simplify
1463 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1464 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1465 && integer_nonzerop (@1)
1466 && !TREE_OVERFLOW (@1)
1467 && !TREE_OVERFLOW (@2))
1468 (with { tree lo, hi; bool neg_overflow;
1469 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1470 &neg_overflow); }
1471 (switch
1472 (if (code == LT_EXPR || code == GE_EXPR)
1473 (if (TREE_OVERFLOW (lo))
1474 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1475 (if (code == LT_EXPR)
1476 (lt @0 { lo; })
1477 (ge @0 { lo; }))))
1478 (if (code == LE_EXPR || code == GT_EXPR)
1479 (if (TREE_OVERFLOW (hi))
1480 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1481 (if (code == LE_EXPR)
1482 (le @0 { hi; })
1483 (gt @0 { hi; }))))
1484 (if (!lo && !hi)
1485 { build_int_cst (type, code == NE_EXPR); })
1486 (if (code == EQ_EXPR && !hi)
1487 (ge @0 { lo; }))
1488 (if (code == EQ_EXPR && !lo)
1489 (le @0 { hi; }))
1490 (if (code == NE_EXPR && !hi)
1491 (lt @0 { lo; }))
1492 (if (code == NE_EXPR && !lo)
1493 (gt @0 { hi; }))
1494 (if (GENERIC)
1495 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1496 lo, hi); })
1497 (with
1498 {
1499 tree etype = range_check_type (TREE_TYPE (@0));
1500 if (etype)
1501 {
1502 if (! TYPE_UNSIGNED (etype))
1503 etype = unsigned_type_for (etype);
1504 hi = fold_convert (etype, hi);
1505 lo = fold_convert (etype, lo);
1506 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1507 }
1508 }
1509 (if (etype && hi && !TREE_OVERFLOW (hi))
1510 (if (code == EQ_EXPR)
1511 (le (minus (convert:etype @0) { lo; }) { hi; })
1512 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1513
d35256b6
MG
1514/* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1515(for op (lt le ge gt)
1516 (simplify
1517 (op (plus:c @0 @2) (plus:c @1 @2))
1518 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1519 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1520 (op @0 @1))))
1521/* For equality and subtraction, this is also true with wrapping overflow. */
1522(for op (eq ne minus)
1523 (simplify
1524 (op (plus:c @0 @2) (plus:c @1 @2))
1525 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1526 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1527 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1528 (op @0 @1))))
1529
1530/* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1531(for op (lt le ge gt)
1532 (simplify
1533 (op (minus @0 @2) (minus @1 @2))
1534 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1535 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1536 (op @0 @1))))
1537/* For equality and subtraction, this is also true with wrapping overflow. */
1538(for op (eq ne minus)
1539 (simplify
1540 (op (minus @0 @2) (minus @1 @2))
1541 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1542 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1543 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1544 (op @0 @1))))
1af4ebf5
MG
1545/* And for pointers... */
1546(for op (simple_comparison)
1547 (simplify
1548 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1549 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1550 (op @0 @1))))
1551(simplify
1552 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1553 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1554 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1555 (pointer_diff @0 @1)))
d35256b6
MG
1556
1557/* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1558(for op (lt le ge gt)
1559 (simplify
1560 (op (minus @2 @0) (minus @2 @1))
1561 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1562 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1563 (op @1 @0))))
1564/* For equality and subtraction, this is also true with wrapping overflow. */
1565(for op (eq ne minus)
1566 (simplify
1567 (op (minus @2 @0) (minus @2 @1))
1568 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1569 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1570 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1571 (op @1 @0))))
1af4ebf5
MG
1572/* And for pointers... */
1573(for op (simple_comparison)
1574 (simplify
1575 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1576 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1577 (op @1 @0))))
1578(simplify
1579 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1580 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1581 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1582 (pointer_diff @1 @0)))
d35256b6 1583
6358a676
MG
1584/* X + Y < Y is the same as X < 0 when there is no overflow. */
1585(for op (lt le gt ge)
1586 (simplify
1587 (op:c (plus:c@2 @0 @1) @1)
1588 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1589 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
cbd42900 1590 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
6358a676
MG
1591 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1592 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1593/* For equality, this is also true with wrapping overflow. */
1594(for op (eq ne)
1595 (simplify
1596 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1597 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1598 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1599 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1600 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1601 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1602 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1603 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1604 (simplify
1605 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1606 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1607 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1608 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1609 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1610
1611/* X - Y < X is the same as Y > 0 when there is no overflow.
1612 For equality, this is also true with wrapping overflow. */
1613(for op (simple_comparison)
1614 (simplify
1615 (op:c @0 (minus@2 @0 @1))
1616 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1617 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1618 || ((op == EQ_EXPR || op == NE_EXPR)
1619 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1620 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1621 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1622
1d6fadee 1623/* Transform:
b8d85005
JJ
1624 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1625 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1d6fadee
PK
1626(for cmp (eq ne)
1627 ocmp (lt ge)
1628 (simplify
1629 (cmp (trunc_div @0 @1) integer_zerop)
1630 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
b8d85005
JJ
1631 /* Complex ==/!= is allowed, but not </>=. */
1632 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1d6fadee
PK
1633 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1634 (ocmp @0 @1))))
1635
8b656ca7
MG
1636/* X == C - X can never be true if C is odd. */
1637(for cmp (eq ne)
1638 (simplify
1639 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1640 (if (TREE_INT_CST_LOW (@1) & 1)
1641 { constant_boolean_node (cmp == NE_EXPR, type); })))
1642
10bc8017
MG
1643/* Arguments on which one can call get_nonzero_bits to get the bits
1644 possibly set. */
1645(match with_possible_nonzero_bits
1646 INTEGER_CST@0)
1647(match with_possible_nonzero_bits
1648 SSA_NAME@0
1649 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1650/* Slightly extended version, do not make it recursive to keep it cheap. */
1651(match (with_possible_nonzero_bits2 @0)
1652 with_possible_nonzero_bits@0)
1653(match (with_possible_nonzero_bits2 @0)
1654 (bit_and:c with_possible_nonzero_bits@0 @2))
1655
1656/* Same for bits that are known to be set, but we do not have
1657 an equivalent to get_nonzero_bits yet. */
1658(match (with_certain_nonzero_bits2 @0)
1659 INTEGER_CST@0)
1660(match (with_certain_nonzero_bits2 @0)
1661 (bit_ior @1 INTEGER_CST@0))
1662
1663/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1664(for cmp (eq ne)
1665 (simplify
1666 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
8e6cdc90 1667 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
10bc8017
MG
1668 { constant_boolean_node (cmp == NE_EXPR, type); })))
1669
84ff66b8
AV
1670/* ((X inner_op C0) outer_op C1)
1671 With X being a tree where value_range has reasoned certain bits to always be
1672 zero throughout its computed value range,
1673 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1674 where zero_mask has 1's for all bits that are sure to be 0 in
1675 and 0's otherwise.
1676 if (inner_op == '^') C0 &= ~C1;
1677 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1678 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1679*/
1680(for inner_op (bit_ior bit_xor)
1681 outer_op (bit_xor bit_ior)
1682(simplify
1683 (outer_op
1684 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1685 (with
1686 {
1687 bool fail = false;
1688 wide_int zero_mask_not;
1689 wide_int C0;
1690 wide_int cst_emit;
1691
1692 if (TREE_CODE (@2) == SSA_NAME)
1693 zero_mask_not = get_nonzero_bits (@2);
1694 else
1695 fail = true;
1696
1697 if (inner_op == BIT_XOR_EXPR)
1698 {
8e6cdc90
RS
1699 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1700 cst_emit = C0 | wi::to_wide (@1);
84ff66b8
AV
1701 }
1702 else
1703 {
8e6cdc90
RS
1704 C0 = wi::to_wide (@0);
1705 cst_emit = C0 ^ wi::to_wide (@1);
84ff66b8
AV
1706 }
1707 }
8e6cdc90 1708 (if (!fail && (C0 & zero_mask_not) == 0)
84ff66b8 1709 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
8e6cdc90 1710 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
84ff66b8
AV
1711 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1712
a499aac5
RB
1713/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1714(simplify
44fc0a51
RB
1715 (pointer_plus (pointer_plus:s @0 @1) @3)
1716 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1717
1718/* Pattern match
1719 tem1 = (long) ptr1;
1720 tem2 = (long) ptr2;
1721 tem3 = tem2 - tem1;
1722 tem4 = (unsigned long) tem3;
1723 tem5 = ptr1 + tem4;
1724 and produce
1725 tem5 = ptr2; */
1726(simplify
1727 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1728 /* Conditionally look through a sign-changing conversion. */
1729 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1730 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1731 || (GENERIC && type == TREE_TYPE (@1))))
1732 @1))
1af4ebf5
MG
1733(simplify
1734 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1735 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1736 (convert @1)))
a499aac5
RB
1737
1738/* Pattern match
1739 tem = (sizetype) ptr;
1740 tem = tem & algn;
1741 tem = -tem;
1742 ... = ptr p+ tem;
1743 and produce the simpler and easier to analyze with respect to alignment
1744 ... = ptr & ~algn; */
1745(simplify
1746 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
8e6cdc90 1747 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
a499aac5
RB
1748 (bit_and @0 { algn; })))
1749
99e943a2
RB
1750/* Try folding difference of addresses. */
1751(simplify
1752 (minus (convert ADDR_EXPR@0) (convert @1))
1753 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1754 (with { poly_int64 diff; }
99e943a2
RB
1755 (if (ptr_difference_const (@0, @1, &diff))
1756 { build_int_cst_type (type, diff); }))))
1757(simplify
1758 (minus (convert @0) (convert ADDR_EXPR@1))
1759 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1760 (with { poly_int64 diff; }
99e943a2
RB
1761 (if (ptr_difference_const (@0, @1, &diff))
1762 { build_int_cst_type (type, diff); }))))
1af4ebf5 1763(simplify
67fccea4 1764 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1af4ebf5
MG
1765 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1766 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1767 (with { poly_int64 diff; }
1af4ebf5
MG
1768 (if (ptr_difference_const (@0, @1, &diff))
1769 { build_int_cst_type (type, diff); }))))
1770(simplify
67fccea4 1771 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1af4ebf5
MG
1772 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1773 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1774 (with { poly_int64 diff; }
1af4ebf5
MG
1775 (if (ptr_difference_const (@0, @1, &diff))
1776 { build_int_cst_type (type, diff); }))))
99e943a2 1777
bab73f11
RB
1778/* If arg0 is derived from the address of an object or function, we may
1779 be able to fold this expression using the object or function's
1780 alignment. */
1781(simplify
1782 (bit_and (convert? @0) INTEGER_CST@1)
1783 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1784 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1785 (with
1786 {
1787 unsigned int align;
1788 unsigned HOST_WIDE_INT bitpos;
1789 get_pointer_alignment_1 (@0, &align, &bitpos);
1790 }
8e6cdc90
RS
1791 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1792 { wide_int_to_tree (type, (wi::to_wide (@1)
1793 & (bitpos / BITS_PER_UNIT))); }))))
99e943a2 1794
a499aac5 1795
cc7b5acf
RB
1796/* We can't reassociate at all for saturating types. */
1797(if (!TYPE_SATURATING (type))
1798
1799 /* Contract negates. */
1800 /* A + (-B) -> A - B */
1801 (simplify
248179b5
RB
1802 (plus:c @0 (convert? (negate @1)))
1803 /* Apply STRIP_NOPS on the negate. */
1804 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1805 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
1806 (with
1807 {
1808 tree t1 = type;
1809 if (INTEGRAL_TYPE_P (type)
1810 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1811 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1812 }
1813 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
cc7b5acf
RB
1814 /* A - (-B) -> A + B */
1815 (simplify
248179b5
RB
1816 (minus @0 (convert? (negate @1)))
1817 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1818 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
1819 (with
1820 {
1821 tree t1 = type;
1822 if (INTEGRAL_TYPE_P (type)
1823 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1824 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1825 }
1826 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
63626547
MG
1827 /* -(T)(-A) -> (T)A
1828 Sign-extension is ok except for INT_MIN, which thankfully cannot
1829 happen without overflow. */
1830 (simplify
1831 (negate (convert (negate @1)))
1832 (if (INTEGRAL_TYPE_P (type)
1833 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1834 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1835 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1836 && !TYPE_OVERFLOW_SANITIZED (type)
1837 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
a0f12cf8 1838 (convert @1)))
63626547
MG
1839 (simplify
1840 (negate (convert negate_expr_p@1))
1841 (if (SCALAR_FLOAT_TYPE_P (type)
1842 && ((DECIMAL_FLOAT_TYPE_P (type)
1843 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1844 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1845 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1846 (convert (negate @1))))
1847 (simplify
1848 (negate (nop_convert (negate @1)))
1849 (if (!TYPE_OVERFLOW_SANITIZED (type)
1850 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1851 (view_convert @1)))
cc7b5acf 1852
7318e44f
RB
1853 /* We can't reassociate floating-point unless -fassociative-math
1854 or fixed-point plus or minus because of saturation to +-Inf. */
1855 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1856 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
1857
1858 /* Match patterns that allow contracting a plus-minus pair
1859 irrespective of overflow issues. */
1860 /* (A +- B) - A -> +- B */
1861 /* (A +- B) -+ B -> A */
1862 /* A - (A +- B) -> -+ B */
1863 /* A +- (B -+ A) -> +- B */
1864 (simplify
1865 (minus (plus:c @0 @1) @0)
1866 @1)
1867 (simplify
1868 (minus (minus @0 @1) @0)
1869 (negate @1))
1870 (simplify
1871 (plus:c (minus @0 @1) @1)
1872 @0)
1873 (simplify
1874 (minus @0 (plus:c @0 @1))
1875 (negate @1))
1876 (simplify
1877 (minus @0 (minus @0 @1))
1878 @1)
1e7df2e6
MG
1879 /* (A +- B) + (C - A) -> C +- B */
1880 /* (A + B) - (A - C) -> B + C */
1881 /* More cases are handled with comparisons. */
1882 (simplify
1883 (plus:c (plus:c @0 @1) (minus @2 @0))
1884 (plus @2 @1))
1885 (simplify
1886 (plus:c (minus @0 @1) (minus @2 @0))
1887 (minus @2 @1))
1af4ebf5
MG
1888 (simplify
1889 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1890 (if (TYPE_OVERFLOW_UNDEFINED (type)
1891 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1892 (pointer_diff @2 @1)))
1e7df2e6
MG
1893 (simplify
1894 (minus (plus:c @0 @1) (minus @0 @2))
1895 (plus @1 @2))
cc7b5acf 1896
ed73f46f
MG
1897 /* (A +- CST1) +- CST2 -> A + CST3
1898 Use view_convert because it is safe for vectors and equivalent for
1899 scalars. */
cc7b5acf
RB
1900 (for outer_op (plus minus)
1901 (for inner_op (plus minus)
ed73f46f 1902 neg_inner_op (minus plus)
cc7b5acf 1903 (simplify
ed73f46f
MG
1904 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1905 CONSTANT_CLASS_P@2)
1906 /* If one of the types wraps, use that one. */
1907 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3eb1eecf
JJ
1908 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1909 forever if something doesn't simplify into a constant. */
1910 (if (!CONSTANT_CLASS_P (@0))
1911 (if (outer_op == PLUS_EXPR)
1912 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1913 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
ed73f46f
MG
1914 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1915 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1916 (if (outer_op == PLUS_EXPR)
1917 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1918 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1919 /* If the constant operation overflows we cannot do the transform
1920 directly as we would introduce undefined overflow, for example
1921 with (a - 1) + INT_MIN. */
1922 (if (types_match (type, @0))
1923 (with { tree cst = const_binop (outer_op == inner_op
1924 ? PLUS_EXPR : MINUS_EXPR,
1925 type, @1, @2); }
1926 (if (cst && !TREE_OVERFLOW (cst))
1927 (inner_op @0 { cst; } )
1928 /* X+INT_MAX+1 is X-INT_MIN. */
1929 (if (INTEGRAL_TYPE_P (type) && cst
8e6cdc90
RS
1930 && wi::to_wide (cst) == wi::min_value (type))
1931 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
ed73f46f
MG
1932 /* Last resort, use some unsigned type. */
1933 (with { tree utype = unsigned_type_for (type); }
48fcd201
JJ
1934 (if (utype)
1935 (view_convert (inner_op
1936 (view_convert:utype @0)
1937 (view_convert:utype
1938 { drop_tree_overflow (cst); }))))))))))))))
cc7b5acf 1939
b302f2e0 1940 /* (CST1 - A) +- CST2 -> CST3 - A */
cc7b5acf
RB
1941 (for outer_op (plus minus)
1942 (simplify
1943 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
23f27839 1944 (with { tree cst = const_binop (outer_op, type, @1, @2); }
cc7b5acf
RB
1945 (if (cst && !TREE_OVERFLOW (cst))
1946 (minus { cst; } @0)))))
1947
b302f2e0
RB
1948 /* CST1 - (CST2 - A) -> CST3 + A */
1949 (simplify
1950 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1951 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1952 (if (cst && !TREE_OVERFLOW (cst))
1953 (plus { cst; } @0))))
1954
cc7b5acf
RB
1955 /* ~A + A -> -1 */
1956 (simplify
1957 (plus:c (bit_not @0) @0)
1958 (if (!TYPE_OVERFLOW_TRAPS (type))
1959 { build_all_ones_cst (type); }))
1960
1961 /* ~A + 1 -> -A */
1962 (simplify
e19740ae
RB
1963 (plus (convert? (bit_not @0)) integer_each_onep)
1964 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1965 (negate (convert @0))))
1966
1967 /* -A - 1 -> ~A */
1968 (simplify
1969 (minus (convert? (negate @0)) integer_each_onep)
1970 (if (!TYPE_OVERFLOW_TRAPS (type)
1971 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1972 (bit_not (convert @0))))
1973
1974 /* -1 - A -> ~A */
1975 (simplify
1976 (minus integer_all_onesp @0)
bc4315fb 1977 (bit_not @0))
cc7b5acf
RB
1978
1979 /* (T)(P + A) - (T)P -> (T) A */
d7f44d4d 1980 (simplify
a72610d4
JJ
1981 (minus (convert (plus:c @@0 @1))
1982 (convert? @0))
d7f44d4d
JJ
1983 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1984 /* For integer types, if A has a smaller type
1985 than T the result depends on the possible
1986 overflow in P + A.
1987 E.g. T=size_t, A=(unsigned)429497295, P>0.
1988 However, if an overflow in P + A would cause
1989 undefined behavior, we can assume that there
1990 is no overflow. */
a72610d4
JJ
1991 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1992 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
1993 (convert @1)))
1994 (simplify
1995 (minus (convert (pointer_plus @@0 @1))
1996 (convert @0))
1997 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1998 /* For pointer types, if the conversion of A to the
1999 final type requires a sign- or zero-extension,
2000 then we have to punt - it is not defined which
2001 one is correct. */
2002 || (POINTER_TYPE_P (TREE_TYPE (@0))
2003 && TREE_CODE (@1) == INTEGER_CST
2004 && tree_int_cst_sign_bit (@1) == 0))
2005 (convert @1)))
1af4ebf5
MG
2006 (simplify
2007 (pointer_diff (pointer_plus @@0 @1) @0)
2008 /* The second argument of pointer_plus must be interpreted as signed, and
2009 thus sign-extended if necessary. */
2010 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2011 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2012 second arg is unsigned even when we need to consider it as signed,
2013 we don't want to diagnose overflow here. */
2014 (convert (view_convert:stype @1))))
a8fc2579
RB
2015
2016 /* (T)P - (T)(P + A) -> -(T) A */
d7f44d4d 2017 (simplify
a72610d4
JJ
2018 (minus (convert? @0)
2019 (convert (plus:c @@0 @1)))
d7f44d4d
JJ
2020 (if (INTEGRAL_TYPE_P (type)
2021 && TYPE_OVERFLOW_UNDEFINED (type)
2022 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2023 (with { tree utype = unsigned_type_for (type); }
2024 (convert (negate (convert:utype @1))))
a8fc2579
RB
2025 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2026 /* For integer types, if A has a smaller type
2027 than T the result depends on the possible
2028 overflow in P + A.
2029 E.g. T=size_t, A=(unsigned)429497295, P>0.
2030 However, if an overflow in P + A would cause
2031 undefined behavior, we can assume that there
2032 is no overflow. */
a72610d4
JJ
2033 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2034 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
2035 (negate (convert @1)))))
2036 (simplify
2037 (minus (convert @0)
2038 (convert (pointer_plus @@0 @1)))
2039 (if (INTEGRAL_TYPE_P (type)
2040 && TYPE_OVERFLOW_UNDEFINED (type)
2041 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2042 (with { tree utype = unsigned_type_for (type); }
2043 (convert (negate (convert:utype @1))))
2044 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2045 /* For pointer types, if the conversion of A to the
2046 final type requires a sign- or zero-extension,
2047 then we have to punt - it is not defined which
2048 one is correct. */
2049 || (POINTER_TYPE_P (TREE_TYPE (@0))
2050 && TREE_CODE (@1) == INTEGER_CST
2051 && tree_int_cst_sign_bit (@1) == 0))
2052 (negate (convert @1)))))
1af4ebf5
MG
2053 (simplify
2054 (pointer_diff @0 (pointer_plus @@0 @1))
2055 /* The second argument of pointer_plus must be interpreted as signed, and
2056 thus sign-extended if necessary. */
2057 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2058 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2059 second arg is unsigned even when we need to consider it as signed,
2060 we don't want to diagnose overflow here. */
2061 (negate (convert (view_convert:stype @1)))))
a8fc2579
RB
2062
2063 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
d7f44d4d 2064 (simplify
a72610d4 2065 (minus (convert (plus:c @@0 @1))
d7f44d4d
JJ
2066 (convert (plus:c @0 @2)))
2067 (if (INTEGRAL_TYPE_P (type)
2068 && TYPE_OVERFLOW_UNDEFINED (type)
a72610d4
JJ
2069 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2070 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
d7f44d4d
JJ
2071 (with { tree utype = unsigned_type_for (type); }
2072 (convert (minus (convert:utype @1) (convert:utype @2))))
a72610d4
JJ
2073 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2074 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2075 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2076 /* For integer types, if A has a smaller type
2077 than T the result depends on the possible
2078 overflow in P + A.
2079 E.g. T=size_t, A=(unsigned)429497295, P>0.
2080 However, if an overflow in P + A would cause
2081 undefined behavior, we can assume that there
2082 is no overflow. */
2083 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2084 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2085 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2086 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
d7f44d4d
JJ
2087 (minus (convert @1) (convert @2)))))
2088 (simplify
2089 (minus (convert (pointer_plus @@0 @1))
2090 (convert (pointer_plus @0 @2)))
2091 (if (INTEGRAL_TYPE_P (type)
2092 && TYPE_OVERFLOW_UNDEFINED (type)
2093 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2094 (with { tree utype = unsigned_type_for (type); }
2095 (convert (minus (convert:utype @1) (convert:utype @2))))
2096 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2097 /* For pointer types, if the conversion of A to the
2098 final type requires a sign- or zero-extension,
2099 then we have to punt - it is not defined which
2100 one is correct. */
2101 || (POINTER_TYPE_P (TREE_TYPE (@0))
2102 && TREE_CODE (@1) == INTEGER_CST
2103 && tree_int_cst_sign_bit (@1) == 0
2104 && TREE_CODE (@2) == INTEGER_CST
2105 && tree_int_cst_sign_bit (@2) == 0))
d7f44d4d 2106 (minus (convert @1) (convert @2)))))
1af4ebf5
MG
2107 (simplify
2108 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2109 /* The second argument of pointer_plus must be interpreted as signed, and
2110 thus sign-extended if necessary. */
2111 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2112 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2113 second arg is unsigned even when we need to consider it as signed,
2114 we don't want to diagnose overflow here. */
2115 (minus (convert (view_convert:stype @1))
2116 (convert (view_convert:stype @2)))))))
cc7b5acf 2117
5b55e6e3
RB
2118/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2119 Modeled after fold_plusminus_mult_expr. */
2120(if (!TYPE_SATURATING (type)
2121 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2122 (for plusminus (plus minus)
2123 (simplify
c1bbe5b3
RB
2124 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2125 (if ((!ANY_INTEGRAL_TYPE_P (type)
5b55e6e3
RB
2126 || TYPE_OVERFLOW_WRAPS (type)
2127 || (INTEGRAL_TYPE_P (type)
2128 && tree_expr_nonzero_p (@0)
2129 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
c1bbe5b3
RB
2130 /* If @1 +- @2 is constant require a hard single-use on either
2131 original operand (but not on both). */
2132 && (single_use (@3) || single_use (@4)))
2133 (mult (plusminus @1 @2) @0)))
2134 /* We cannot generate constant 1 for fract. */
2135 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2136 (simplify
2137 (plusminus @0 (mult:c@3 @0 @2))
2138 (if ((!ANY_INTEGRAL_TYPE_P (type)
2139 || TYPE_OVERFLOW_WRAPS (type)
2140 || (INTEGRAL_TYPE_P (type)
2141 && tree_expr_nonzero_p (@0)
2142 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2143 && single_use (@3))
5b55e6e3
RB
2144 (mult (plusminus { build_one_cst (type); } @2) @0)))
2145 (simplify
c1bbe5b3
RB
2146 (plusminus (mult:c@3 @0 @2) @0)
2147 (if ((!ANY_INTEGRAL_TYPE_P (type)
2148 || TYPE_OVERFLOW_WRAPS (type)
2149 || (INTEGRAL_TYPE_P (type)
2150 && tree_expr_nonzero_p (@0)
2151 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2152 && single_use (@3))
5b55e6e3 2153 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
cc7b5acf 2154
0122e8e5 2155/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 2156
c6cfa2bf 2157(for minmax (min max FMIN_ALL FMAX_ALL)
a7f24614
RB
2158 (simplify
2159 (minmax @0 @0)
2160 @0))
4a334cba
RS
2161/* min(max(x,y),y) -> y. */
2162(simplify
2163 (min:c (max:c @0 @1) @1)
2164 @1)
2165/* max(min(x,y),y) -> y. */
2166(simplify
2167 (max:c (min:c @0 @1) @1)
2168 @1)
d657e995
RB
2169/* max(a,-a) -> abs(a). */
2170(simplify
2171 (max:c @0 (negate @0))
2172 (if (TREE_CODE (type) != COMPLEX_TYPE
2173 && (! ANY_INTEGRAL_TYPE_P (type)
2174 || TYPE_OVERFLOW_UNDEFINED (type)))
2175 (abs @0)))
54f84ca9
RB
2176/* min(a,-a) -> -abs(a). */
2177(simplify
2178 (min:c @0 (negate @0))
2179 (if (TREE_CODE (type) != COMPLEX_TYPE
2180 && (! ANY_INTEGRAL_TYPE_P (type)
2181 || TYPE_OVERFLOW_UNDEFINED (type)))
2182 (negate (abs @0))))
a7f24614
RB
2183(simplify
2184 (min @0 @1)
2c2870a1
MG
2185 (switch
2186 (if (INTEGRAL_TYPE_P (type)
2187 && TYPE_MIN_VALUE (type)
2188 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2189 @1)
2190 (if (INTEGRAL_TYPE_P (type)
2191 && TYPE_MAX_VALUE (type)
2192 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2193 @0)))
a7f24614
RB
2194(simplify
2195 (max @0 @1)
2c2870a1
MG
2196 (switch
2197 (if (INTEGRAL_TYPE_P (type)
2198 && TYPE_MAX_VALUE (type)
2199 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2200 @1)
2201 (if (INTEGRAL_TYPE_P (type)
2202 && TYPE_MIN_VALUE (type)
2203 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2204 @0)))
ad6e4ba8 2205
182f37c9
N
2206/* max (a, a + CST) -> a + CST where CST is positive. */
2207/* max (a, a + CST) -> a where CST is negative. */
2208(simplify
2209 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2210 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2211 (if (tree_int_cst_sgn (@1) > 0)
2212 @2
2213 @0)))
2214
2215/* min (a, a + CST) -> a where CST is positive. */
2216/* min (a, a + CST) -> a + CST where CST is negative. */
2217(simplify
2218 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2219 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2220 (if (tree_int_cst_sgn (@1) > 0)
2221 @0
2222 @2)))
2223
ad6e4ba8
BC
2224/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2225 and the outer convert demotes the expression back to x's type. */
2226(for minmax (min max)
2227 (simplify
2228 (convert (minmax@0 (convert @1) INTEGER_CST@2))
ebf41734
BC
2229 (if (INTEGRAL_TYPE_P (type)
2230 && types_match (@1, type) && int_fits_type_p (@2, type)
ad6e4ba8
BC
2231 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2232 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2233 (minmax @1 (convert @2)))))
2234
c6cfa2bf 2235(for minmax (FMIN_ALL FMAX_ALL)
0122e8e5
RS
2236 /* If either argument is NaN, return the other one. Avoid the
2237 transformation if we get (and honor) a signalling NaN. */
2238 (simplify
2239 (minmax:c @0 REAL_CST@1)
2240 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2241 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2242 @0)))
2243/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2244 functions to return the numeric arg if the other one is NaN.
2245 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2246 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2247 worry about it either. */
2248(if (flag_finite_math_only)
2249 (simplify
c6cfa2bf 2250 (FMIN_ALL @0 @1)
0122e8e5 2251 (min @0 @1))
4119b2eb 2252 (simplify
c6cfa2bf 2253 (FMAX_ALL @0 @1)
0122e8e5 2254 (max @0 @1)))
ce0e66ff 2255/* min (-A, -B) -> -max (A, B) */
c6cfa2bf
MM
2256(for minmax (min max FMIN_ALL FMAX_ALL)
2257 maxmin (max min FMAX_ALL FMIN_ALL)
ce0e66ff
MG
2258 (simplify
2259 (minmax (negate:s@2 @0) (negate:s@3 @1))
2260 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2261 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2262 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2263 (negate (maxmin @0 @1)))))
2264/* MIN (~X, ~Y) -> ~MAX (X, Y)
2265 MAX (~X, ~Y) -> ~MIN (X, Y) */
2266(for minmax (min max)
2267 maxmin (max min)
2268 (simplify
2269 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2270 (bit_not (maxmin @0 @1))))
a7f24614 2271
b4817bd6
MG
2272/* MIN (X, Y) == X -> X <= Y */
2273(for minmax (min min max max)
2274 cmp (eq ne eq ne )
2275 out (le gt ge lt )
2276 (simplify
2277 (cmp:c (minmax:c @0 @1) @0)
2278 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2279 (out @0 @1))))
2280/* MIN (X, 5) == 0 -> X == 0
2281 MIN (X, 5) == 7 -> false */
2282(for cmp (eq ne)
2283 (simplify
2284 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2285 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2286 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2287 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2288 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2289 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2290 (cmp @0 @2)))))
2291(for cmp (eq ne)
2292 (simplify
2293 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2294 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2295 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2296 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2297 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2298 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2299 (cmp @0 @2)))))
2300/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2301(for minmax (min min max max min min max max )
2302 cmp (lt le gt ge gt ge lt le )
2303 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2304 (simplify
2305 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2306 (comb (cmp @0 @2) (cmp @1 @2))))
2307
a7f24614
RB
2308/* Simplifications of shift and rotates. */
2309
2310(for rotate (lrotate rrotate)
2311 (simplify
2312 (rotate integer_all_onesp@0 @1)
2313 @0))
2314
2315/* Optimize -1 >> x for arithmetic right shifts. */
2316(simplify
2317 (rshift integer_all_onesp@0 @1)
2318 (if (!TYPE_UNSIGNED (type)
2319 && tree_expr_nonnegative_p (@1))
2320 @0))
2321
12085390
N
2322/* Optimize (x >> c) << c into x & (-1<<c). */
2323(simplify
2324 (lshift (rshift @0 INTEGER_CST@1) @1)
8e6cdc90 2325 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
12085390
N
2326 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2327
2328/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2329 types. */
2330(simplify
2331 (rshift (lshift @0 INTEGER_CST@1) @1)
2332 (if (TYPE_UNSIGNED (type)
8e6cdc90 2333 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
12085390
N
2334 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2335
a7f24614
RB
2336(for shiftrotate (lrotate rrotate lshift rshift)
2337 (simplify
2338 (shiftrotate @0 integer_zerop)
2339 (non_lvalue @0))
2340 (simplify
2341 (shiftrotate integer_zerop@0 @1)
2342 @0)
2343 /* Prefer vector1 << scalar to vector1 << vector2
2344 if vector2 is uniform. */
2345 (for vec (VECTOR_CST CONSTRUCTOR)
2346 (simplify
2347 (shiftrotate @0 vec@1)
2348 (with { tree tem = uniform_vector_p (@1); }
2349 (if (tem)
2350 (shiftrotate @0 { tem; }))))))
2351
165ba2e9
JJ
2352/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2353 Y is 0. Similarly for X >> Y. */
2354#if GIMPLE
2355(for shift (lshift rshift)
2356 (simplify
2357 (shift @0 SSA_NAME@1)
2358 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2359 (with {
2360 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2361 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2362 }
2363 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2364 @0)))))
2365#endif
2366
a7f24614
RB
2367/* Rewrite an LROTATE_EXPR by a constant into an
2368 RROTATE_EXPR by a new constant. */
2369(simplify
2370 (lrotate @0 INTEGER_CST@1)
23f27839 2371 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
2372 build_int_cst (TREE_TYPE (@1),
2373 element_precision (type)), @1); }))
2374
14ea9f92
RB
2375/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2376(for op (lrotate rrotate rshift lshift)
2377 (simplify
2378 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2379 (with { unsigned int prec = element_precision (type); }
8e6cdc90
RS
2380 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2381 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2382 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2383 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
a1488398
RS
2384 (with { unsigned int low = (tree_to_uhwi (@1)
2385 + tree_to_uhwi (@2)); }
14ea9f92
RB
2386 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2387 being well defined. */
2388 (if (low >= prec)
2389 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 2390 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 2391 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
2392 { build_zero_cst (type); }
2393 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2394 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
2395
2396
01ada710
MP
2397/* ((1 << A) & 1) != 0 -> A == 0
2398 ((1 << A) & 1) == 0 -> A != 0 */
2399(for cmp (ne eq)
2400 icmp (eq ne)
2401 (simplify
2402 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2403 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 2404
f2e609c3
MP
2405/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2406 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2407 if CST2 != 0. */
2408(for cmp (ne eq)
2409 (simplify
2410 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
8e6cdc90 2411 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
f2e609c3
MP
2412 (if (cand < 0
2413 || (!integer_zerop (@2)
8e6cdc90 2414 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
8fdc6c67
RB
2415 { constant_boolean_node (cmp == NE_EXPR, type); }
2416 (if (!integer_zerop (@2)
8e6cdc90 2417 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
8fdc6c67 2418 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 2419
1ffbaa3f
RB
2420/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2421 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2422 if the new mask might be further optimized. */
2423(for shift (lshift rshift)
2424 (simplify
44fc0a51
RB
2425 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2426 INTEGER_CST@2)
1ffbaa3f
RB
2427 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2428 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2429 && tree_fits_uhwi_p (@1)
2430 && tree_to_uhwi (@1) > 0
2431 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2432 (with
2433 {
2434 unsigned int shiftc = tree_to_uhwi (@1);
2435 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2436 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2437 tree shift_type = TREE_TYPE (@3);
2438 unsigned int prec;
2439
2440 if (shift == LSHIFT_EXPR)
fecfbfa4 2441 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f 2442 else if (shift == RSHIFT_EXPR
2be65d9e 2443 && type_has_mode_precision_p (shift_type))
1ffbaa3f
RB
2444 {
2445 prec = TYPE_PRECISION (TREE_TYPE (@3));
2446 tree arg00 = @0;
2447 /* See if more bits can be proven as zero because of
2448 zero extension. */
2449 if (@3 != @0
2450 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2451 {
2452 tree inner_type = TREE_TYPE (@0);
2be65d9e 2453 if (type_has_mode_precision_p (inner_type)
1ffbaa3f
RB
2454 && TYPE_PRECISION (inner_type) < prec)
2455 {
2456 prec = TYPE_PRECISION (inner_type);
2457 /* See if we can shorten the right shift. */
2458 if (shiftc < prec)
2459 shift_type = inner_type;
2460 /* Otherwise X >> C1 is all zeros, so we'll optimize
2461 it into (X, 0) later on by making sure zerobits
2462 is all ones. */
2463 }
2464 }
dd4786fe 2465 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
2466 if (shiftc < prec)
2467 {
2468 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2469 zerobits <<= prec - shiftc;
2470 }
2471 /* For arithmetic shift if sign bit could be set, zerobits
2472 can contain actually sign bits, so no transformation is
2473 possible, unless MASK masks them all away. In that
2474 case the shift needs to be converted into logical shift. */
2475 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2476 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2477 {
2478 if ((mask & zerobits) == 0)
2479 shift_type = unsigned_type_for (TREE_TYPE (@3));
2480 else
2481 zerobits = 0;
2482 }
2483 }
2484 }
2485 /* ((X << 16) & 0xff00) is (X, 0). */
2486 (if ((mask & zerobits) == mask)
8fdc6c67
RB
2487 { build_int_cst (type, 0); }
2488 (with { newmask = mask | zerobits; }
2489 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2490 (with
2491 {
2492 /* Only do the transformation if NEWMASK is some integer
2493 mode's mask. */
2494 for (prec = BITS_PER_UNIT;
2495 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 2496 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
2497 break;
2498 }
2499 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 2500 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
2501 (with
2502 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2503 (if (!tree_int_cst_equal (newmaskt, @2))
2504 (if (shift_type != TREE_TYPE (@3))
2505 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2506 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 2507
84ff66b8
AV
2508/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2509 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 2510(for shift (lshift rshift)
84ff66b8
AV
2511 (for bit_op (bit_and bit_xor bit_ior)
2512 (simplify
2513 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2514 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2515 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2516 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 2517
ad1d92ab
MM
2518/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2519(simplify
2520 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2521 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
2522 && (element_precision (TREE_TYPE (@0))
2523 <= element_precision (TREE_TYPE (@1))
2524 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
2525 (with
2526 { tree shift_type = TREE_TYPE (@0); }
2527 (convert (rshift (convert:shift_type @1) @2)))))
2528
2529/* ~(~X >>r Y) -> X >>r Y
2530 ~(~X <<r Y) -> X <<r Y */
2531(for rotate (lrotate rrotate)
2532 (simplify
2533 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
2534 (if ((element_precision (TREE_TYPE (@0))
2535 <= element_precision (TREE_TYPE (@1))
2536 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2537 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2538 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
2539 (with
2540 { tree rotate_type = TREE_TYPE (@0); }
2541 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 2542
d4573ffe
RB
2543/* Simplifications of conversions. */
2544
2545/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 2546(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
2547 (simplify
2548 (cvt @0)
2549 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2550 || (GENERIC && type == TREE_TYPE (@0)))
2551 @0)))
2552
2553/* Contract view-conversions. */
2554(simplify
2555 (view_convert (view_convert @0))
2556 (view_convert @0))
2557
2558/* For integral conversions with the same precision or pointer
2559 conversions use a NOP_EXPR instead. */
2560(simplify
2561 (view_convert @0)
2562 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2563 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2564 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2565 (convert @0)))
2566
bce8ef71
MG
2567/* Strip inner integral conversions that do not change precision or size, or
2568 zero-extend while keeping the same size (for bool-to-char). */
d4573ffe
RB
2569(simplify
2570 (view_convert (convert@0 @1))
2571 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2572 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
bce8ef71
MG
2573 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2574 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2575 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2576 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
d4573ffe
RB
2577 (view_convert @1)))
2578
2579/* Re-association barriers around constants and other re-association
2580 barriers can be removed. */
2581(simplify
2582 (paren CONSTANT_CLASS_P@0)
2583 @0)
2584(simplify
2585 (paren (paren@1 @0))
2586 @1)
1e51d0a2
RB
2587
2588/* Handle cases of two conversions in a row. */
2589(for ocvt (convert float fix_trunc)
2590 (for icvt (convert float)
2591 (simplify
2592 (ocvt (icvt@1 @0))
2593 (with
2594 {
2595 tree inside_type = TREE_TYPE (@0);
2596 tree inter_type = TREE_TYPE (@1);
2597 int inside_int = INTEGRAL_TYPE_P (inside_type);
2598 int inside_ptr = POINTER_TYPE_P (inside_type);
2599 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 2600 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
2601 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2602 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2603 int inter_int = INTEGRAL_TYPE_P (inter_type);
2604 int inter_ptr = POINTER_TYPE_P (inter_type);
2605 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 2606 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
2607 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2608 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2609 int final_int = INTEGRAL_TYPE_P (type);
2610 int final_ptr = POINTER_TYPE_P (type);
2611 int final_float = FLOAT_TYPE_P (type);
09240451 2612 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
2613 unsigned int final_prec = TYPE_PRECISION (type);
2614 int final_unsignedp = TYPE_UNSIGNED (type);
2615 }
64d3a1f0
RB
2616 (switch
2617 /* In addition to the cases of two conversions in a row
2618 handled below, if we are converting something to its own
2619 type via an object of identical or wider precision, neither
2620 conversion is needed. */
2621 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2622 || (GENERIC
2623 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2624 && (((inter_int || inter_ptr) && final_int)
2625 || (inter_float && final_float))
2626 && inter_prec >= final_prec)
2627 (ocvt @0))
2628
2629 /* Likewise, if the intermediate and initial types are either both
2630 float or both integer, we don't need the middle conversion if the
2631 former is wider than the latter and doesn't change the signedness
2632 (for integers). Avoid this if the final type is a pointer since
36088299 2633 then we sometimes need the middle conversion. */
64d3a1f0
RB
2634 (if (((inter_int && inside_int) || (inter_float && inside_float))
2635 && (final_int || final_float)
2636 && inter_prec >= inside_prec
36088299 2637 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
2638 (ocvt @0))
2639
2640 /* If we have a sign-extension of a zero-extended value, we can
2641 replace that by a single zero-extension. Likewise if the
2642 final conversion does not change precision we can drop the
2643 intermediate conversion. */
2644 (if (inside_int && inter_int && final_int
2645 && ((inside_prec < inter_prec && inter_prec < final_prec
2646 && inside_unsignedp && !inter_unsignedp)
2647 || final_prec == inter_prec))
2648 (ocvt @0))
2649
2650 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
2651 - some conversion is floating-point (overstrict for now), or
2652 - some conversion is a vector (overstrict for now), or
2653 - the intermediate type is narrower than both initial and
2654 final, or
2655 - the intermediate type and innermost type differ in signedness,
2656 and the outermost type is wider than the intermediate, or
2657 - the initial type is a pointer type and the precisions of the
2658 intermediate and final types differ, or
2659 - the final type is a pointer type and the precisions of the
2660 initial and intermediate types differ. */
64d3a1f0
RB
2661 (if (! inside_float && ! inter_float && ! final_float
2662 && ! inside_vec && ! inter_vec && ! final_vec
2663 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2664 && ! (inside_int && inter_int
2665 && inter_unsignedp != inside_unsignedp
2666 && inter_prec < final_prec)
2667 && ((inter_unsignedp && inter_prec > inside_prec)
2668 == (final_unsignedp && final_prec > inter_prec))
2669 && ! (inside_ptr && inter_prec != final_prec)
36088299 2670 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
2671 (ocvt @0))
2672
2673 /* A truncation to an unsigned type (a zero-extension) should be
2674 canonicalized as bitwise and of a mask. */
1d510e04
JJ
2675 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2676 && final_int && inter_int && inside_int
64d3a1f0
RB
2677 && final_prec == inside_prec
2678 && final_prec > inter_prec
2679 && inter_unsignedp)
2680 (convert (bit_and @0 { wide_int_to_tree
2681 (inside_type,
2682 wi::mask (inter_prec, false,
2683 TYPE_PRECISION (inside_type))); })))
2684
2685 /* If we are converting an integer to a floating-point that can
2686 represent it exactly and back to an integer, we can skip the
2687 floating-point conversion. */
2688 (if (GIMPLE /* PR66211 */
2689 && inside_int && inter_float && final_int &&
2690 (unsigned) significand_size (TYPE_MODE (inter_type))
2691 >= inside_prec - !inside_unsignedp)
2692 (convert @0)))))))
ea2042ba
RB
2693
2694/* If we have a narrowing conversion to an integral type that is fed by a
2695 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2696 masks off bits outside the final type (and nothing else). */
2697(simplify
2698 (convert (bit_and @0 INTEGER_CST@1))
2699 (if (INTEGRAL_TYPE_P (type)
2700 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2701 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2702 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2703 TYPE_PRECISION (type)), 0))
2704 (convert @0)))
a25454ea
RB
2705
2706
2707/* (X /[ex] A) * A -> X. */
2708(simplify
2eef1fc1
RB
2709 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2710 (convert @0))
eaeba53a 2711
0036218b
MG
2712/* ((X /[ex] A) +- B) * A --> X +- A * B. */
2713(for op (plus minus)
2714 (simplify
2715 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2716 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2717 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2718 (with
2719 {
2720 wi::overflow_type overflow;
2721 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2722 TYPE_SIGN (type), &overflow);
2723 }
2724 (if (types_match (type, TREE_TYPE (@2))
2725 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2726 (op @0 { wide_int_to_tree (type, mul); })
2727 (with { tree utype = unsigned_type_for (type); }
2728 (convert (op (convert:utype @0)
2729 (mult (convert:utype @1) (convert:utype @2))))))))))
2730
a7f24614
RB
2731/* Canonicalization of binary operations. */
2732
2733/* Convert X + -C into X - C. */
2734(simplify
2735 (plus @0 REAL_CST@1)
2736 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 2737 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
2738 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2739 (minus @0 { tem; })))))
2740
6b6aa8d3 2741/* Convert x+x into x*2. */
a7f24614
RB
2742(simplify
2743 (plus @0 @0)
2744 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
2745 (mult @0 { build_real (type, dconst2); })
2746 (if (INTEGRAL_TYPE_P (type))
2747 (mult @0 { build_int_cst (type, 2); }))))
a7f24614 2748
406520e2 2749/* 0 - X -> -X. */
a7f24614
RB
2750(simplify
2751 (minus integer_zerop @1)
2752 (negate @1))
406520e2
MG
2753(simplify
2754 (pointer_diff integer_zerop @1)
2755 (negate (convert @1)))
a7f24614
RB
2756
2757/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2758 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2759 (-ARG1 + ARG0) reduces to -ARG1. */
2760(simplify
2761 (minus real_zerop@0 @1)
2762 (if (fold_real_zero_addition_p (type, @0, 0))
2763 (negate @1)))
2764
2765/* Transform x * -1 into -x. */
2766(simplify
2767 (mult @0 integer_minus_onep)
2768 (negate @0))
eaeba53a 2769
b771c609
AM
2770/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2771 signed overflow for CST != 0 && CST != -1. */
2772(simplify
b46ebc6c 2773 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
b771c609 2774 (if (TREE_CODE (@2) != INTEGER_CST
b46ebc6c 2775 && single_use (@3)
b771c609
AM
2776 && !integer_zerop (@1) && !integer_minus_onep (@1))
2777 (mult (mult @0 @2) @1)))
2778
96285749
RS
2779/* True if we can easily extract the real and imaginary parts of a complex
2780 number. */
2781(match compositional_complex
2782 (convert? (complex @0 @1)))
2783
eaeba53a
RB
2784/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2785(simplify
2786 (complex (realpart @0) (imagpart @0))
2787 @0)
2788(simplify
2789 (realpart (complex @0 @1))
2790 @0)
2791(simplify
2792 (imagpart (complex @0 @1))
2793 @1)
83633539 2794
77c028c5
MG
2795/* Sometimes we only care about half of a complex expression. */
2796(simplify
2797 (realpart (convert?:s (conj:s @0)))
2798 (convert (realpart @0)))
2799(simplify
2800 (imagpart (convert?:s (conj:s @0)))
2801 (convert (negate (imagpart @0))))
2802(for part (realpart imagpart)
2803 (for op (plus minus)
2804 (simplify
2805 (part (convert?:s@2 (op:s @0 @1)))
2806 (convert (op (part @0) (part @1))))))
2807(simplify
2808 (realpart (convert?:s (CEXPI:s @0)))
2809 (convert (COS @0)))
2810(simplify
2811 (imagpart (convert?:s (CEXPI:s @0)))
2812 (convert (SIN @0)))
2813
2814/* conj(conj(x)) -> x */
2815(simplify
2816 (conj (convert? (conj @0)))
2817 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2818 (convert @0)))
2819
2820/* conj({x,y}) -> {x,-y} */
2821(simplify
2822 (conj (convert?:s (complex:s @0 @1)))
2823 (with { tree itype = TREE_TYPE (type); }
2824 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
2825
2826/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2827(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2828 (simplify
2829 (bswap (bswap @0))
2830 @0)
2831 (simplify
2832 (bswap (bit_not (bswap @0)))
2833 (bit_not @0))
2834 (for bitop (bit_xor bit_ior bit_and)
2835 (simplify
2836 (bswap (bitop:c (bswap @0) @1))
2837 (bitop @0 (bswap @1)))))
96994de0
RB
2838
2839
2840/* Combine COND_EXPRs and VEC_COND_EXPRs. */
2841
2842/* Simplify constant conditions.
2843 Only optimize constant conditions when the selected branch
2844 has the same type as the COND_EXPR. This avoids optimizing
2845 away "c ? x : throw", where the throw has a void type.
2846 Note that we cannot throw away the fold-const.c variant nor
2847 this one as we depend on doing this transform before possibly
2848 A ? B : B -> B triggers and the fold-const.c one can optimize
2849 0 ? A : B to B even if A has side-effects. Something
2850 genmatch cannot handle. */
2851(simplify
2852 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
2853 (if (integer_zerop (@0))
2854 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2855 @2)
2856 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2857 @1)))
96994de0
RB
2858(simplify
2859 (vec_cond VECTOR_CST@0 @1 @2)
2860 (if (integer_all_onesp (@0))
8fdc6c67
RB
2861 @1
2862 (if (integer_zerop (@0))
2863 @2)))
96994de0 2864
b5481987
BC
2865/* Simplification moved from fold_cond_expr_with_comparison. It may also
2866 be extended. */
e2535011
BC
2867/* This pattern implements two kinds simplification:
2868
2869 Case 1)
2870 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
b5481987
BC
2871 1) Conversions are type widening from smaller type.
2872 2) Const c1 equals to c2 after canonicalizing comparison.
2873 3) Comparison has tree code LT, LE, GT or GE.
2874 This specific pattern is needed when (cmp (convert x) c) may not
2875 be simplified by comparison patterns because of multiple uses of
2876 x. It also makes sense here because simplifying across multiple
e2535011
BC
2877 referred var is always benefitial for complicated cases.
2878
2879 Case 2)
2880 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2881(for cmp (lt le gt ge eq)
b5481987 2882 (simplify
ae22bc5d 2883 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
b5481987
BC
2884 (with
2885 {
2886 tree from_type = TREE_TYPE (@1);
2887 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
ae22bc5d 2888 enum tree_code code = ERROR_MARK;
b5481987 2889
ae22bc5d
BC
2890 if (INTEGRAL_TYPE_P (from_type)
2891 && int_fits_type_p (@2, from_type)
b5481987
BC
2892 && (types_match (c1_type, from_type)
2893 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2894 && (TYPE_UNSIGNED (from_type)
2895 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2896 && (types_match (c2_type, from_type)
2897 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2898 && (TYPE_UNSIGNED (from_type)
2899 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2900 {
ae22bc5d 2901 if (cmp != EQ_EXPR)
b5481987 2902 {
e2535011
BC
2903 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2904 {
2905 /* X <= Y - 1 equals to X < Y. */
ae22bc5d 2906 if (cmp == LE_EXPR)
e2535011
BC
2907 code = LT_EXPR;
2908 /* X > Y - 1 equals to X >= Y. */
ae22bc5d 2909 if (cmp == GT_EXPR)
e2535011
BC
2910 code = GE_EXPR;
2911 }
2912 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2913 {
2914 /* X < Y + 1 equals to X <= Y. */
ae22bc5d 2915 if (cmp == LT_EXPR)
e2535011
BC
2916 code = LE_EXPR;
2917 /* X >= Y + 1 equals to X > Y. */
ae22bc5d 2918 if (cmp == GE_EXPR)
e2535011
BC
2919 code = GT_EXPR;
2920 }
ae22bc5d
BC
2921 if (code != ERROR_MARK
2922 || wi::to_widest (@2) == wi::to_widest (@3))
e2535011 2923 {
ae22bc5d 2924 if (cmp == LT_EXPR || cmp == LE_EXPR)
e2535011 2925 code = MIN_EXPR;
ae22bc5d 2926 if (cmp == GT_EXPR || cmp == GE_EXPR)
e2535011
BC
2927 code = MAX_EXPR;
2928 }
b5481987 2929 }
e2535011 2930 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
ae22bc5d
BC
2931 else if (int_fits_type_p (@3, from_type))
2932 code = EQ_EXPR;
b5481987
BC
2933 }
2934 }
2935 (if (code == MAX_EXPR)
21aaaf1e 2936 (convert (max @1 (convert @2)))
b5481987 2937 (if (code == MIN_EXPR)
21aaaf1e 2938 (convert (min @1 (convert @2)))
e2535011 2939 (if (code == EQ_EXPR)
ae22bc5d 2940 (convert (cond (eq @1 (convert @3))
21aaaf1e 2941 (convert:from_type @3) (convert:from_type @2)))))))))
b5481987 2942
714445ae
BC
2943/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2944
2945 1) OP is PLUS or MINUS.
2946 2) CMP is LT, LE, GT or GE.
2947 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2948
2949 This pattern also handles special cases like:
2950
2951 A) Operand x is a unsigned to signed type conversion and c1 is
2952 integer zero. In this case,
2953 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2954 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2955 B) Const c1 may not equal to (C3 op' C2). In this case we also
2956 check equality for (c1+1) and (c1-1) by adjusting comparison
2957 code.
2958
2959 TODO: Though signed type is handled by this pattern, it cannot be
2960 simplified at the moment because C standard requires additional
2961 type promotion. In order to match&simplify it here, the IR needs
2962 to be cleaned up by other optimizers, i.e, VRP. */
2963(for op (plus minus)
2964 (for cmp (lt le gt ge)
2965 (simplify
2966 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2967 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2968 (if (types_match (from_type, to_type)
2969 /* Check if it is special case A). */
2970 || (TYPE_UNSIGNED (from_type)
2971 && !TYPE_UNSIGNED (to_type)
2972 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2973 && integer_zerop (@1)
2974 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2975 (with
2976 {
4a669ac3 2977 wi::overflow_type overflow = wi::OVF_NONE;
714445ae 2978 enum tree_code code, cmp_code = cmp;
8e6cdc90
RS
2979 wide_int real_c1;
2980 wide_int c1 = wi::to_wide (@1);
2981 wide_int c2 = wi::to_wide (@2);
2982 wide_int c3 = wi::to_wide (@3);
714445ae
BC
2983 signop sgn = TYPE_SIGN (from_type);
2984
2985 /* Handle special case A), given x of unsigned type:
2986 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2987 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2988 if (!types_match (from_type, to_type))
2989 {
2990 if (cmp_code == LT_EXPR)
2991 cmp_code = GT_EXPR;
2992 if (cmp_code == GE_EXPR)
2993 cmp_code = LE_EXPR;
2994 c1 = wi::max_value (to_type);
2995 }
2996 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2997 compute (c3 op' c2) and check if it equals to c1 with op' being
2998 the inverted operator of op. Make sure overflow doesn't happen
2999 if it is undefined. */
3000 if (op == PLUS_EXPR)
3001 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3002 else
3003 real_c1 = wi::add (c3, c2, sgn, &overflow);
3004
3005 code = cmp_code;
3006 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3007 {
3008 /* Check if c1 equals to real_c1. Boundary condition is handled
3009 by adjusting comparison operation if necessary. */
3010 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3011 && !overflow)
3012 {
3013 /* X <= Y - 1 equals to X < Y. */
3014 if (cmp_code == LE_EXPR)
3015 code = LT_EXPR;
3016 /* X > Y - 1 equals to X >= Y. */
3017 if (cmp_code == GT_EXPR)
3018 code = GE_EXPR;
3019 }
3020 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3021 && !overflow)
3022 {
3023 /* X < Y + 1 equals to X <= Y. */
3024 if (cmp_code == LT_EXPR)
3025 code = LE_EXPR;
3026 /* X >= Y + 1 equals to X > Y. */
3027 if (cmp_code == GE_EXPR)
3028 code = GT_EXPR;
3029 }
3030 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3031 {
3032 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3033 code = MIN_EXPR;
3034 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3035 code = MAX_EXPR;
3036 }
3037 }
3038 }
3039 (if (code == MAX_EXPR)
3040 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3041 { wide_int_to_tree (from_type, c2); })
3042 (if (code == MIN_EXPR)
3043 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3044 { wide_int_to_tree (from_type, c2); })))))))))
3045
96994de0
RB
3046(for cnd (cond vec_cond)
3047 /* A ? B : (A ? X : C) -> A ? B : C. */
3048 (simplify
3049 (cnd @0 (cnd @0 @1 @2) @3)
3050 (cnd @0 @1 @3))
3051 (simplify
3052 (cnd @0 @1 (cnd @0 @2 @3))
3053 (cnd @0 @1 @3))
24a179f8
RB
3054 /* A ? B : (!A ? C : X) -> A ? B : C. */
3055 /* ??? This matches embedded conditions open-coded because genmatch
3056 would generate matching code for conditions in separate stmts only.
3057 The following is still important to merge then and else arm cases
3058 from if-conversion. */
3059 (simplify
3060 (cnd @0 @1 (cnd @2 @3 @4))
2c58d42c 3061 (if (inverse_conditions_p (@0, @2))
24a179f8
RB
3062 (cnd @0 @1 @3)))
3063 (simplify
3064 (cnd @0 (cnd @1 @2 @3) @4)
2c58d42c 3065 (if (inverse_conditions_p (@0, @1))
24a179f8 3066 (cnd @0 @3 @4)))
96994de0
RB
3067
3068 /* A ? B : B -> B. */
3069 (simplify
3070 (cnd @0 @1 @1)
09240451 3071 @1)
96994de0 3072
09240451
MG
3073 /* !A ? B : C -> A ? C : B. */
3074 (simplify
3075 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3076 (cnd @0 @2 @1)))
f84e7fd6 3077
a3ca1bc5
RB
3078/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3079 return all -1 or all 0 results. */
f43d102e
RS
3080/* ??? We could instead convert all instances of the vec_cond to negate,
3081 but that isn't necessarily a win on its own. */
3082(simplify
a3ca1bc5 3083 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3084 (if (VECTOR_TYPE_P (type)
928686b1
RS
3085 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3086 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3087 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3088 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3089 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 3090
a3ca1bc5 3091/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 3092(simplify
a3ca1bc5 3093 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3094 (if (VECTOR_TYPE_P (type)
928686b1
RS
3095 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3096 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3097 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3098 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3099 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 3100
2ee05f1e 3101
f84e7fd6
RB
3102/* Simplifications of comparisons. */
3103
24f1db9c
RB
3104/* See if we can reduce the magnitude of a constant involved in a
3105 comparison by changing the comparison code. This is a canonicalization
3106 formerly done by maybe_canonicalize_comparison_1. */
3107(for cmp (le gt)
3108 acmp (lt ge)
3109 (simplify
3110 (cmp @0 INTEGER_CST@1)
3111 (if (tree_int_cst_sgn (@1) == -1)
8e6cdc90 3112 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
24f1db9c
RB
3113(for cmp (ge lt)
3114 acmp (gt le)
3115 (simplify
3116 (cmp @0 INTEGER_CST@1)
3117 (if (tree_int_cst_sgn (@1) == 1)
8e6cdc90 3118 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
24f1db9c
RB
3119
3120
f84e7fd6
RB
3121/* We can simplify a logical negation of a comparison to the
3122 inverted comparison. As we cannot compute an expression
3123 operator using invert_tree_comparison we have to simulate
3124 that with expression code iteration. */
3125(for cmp (tcc_comparison)
3126 icmp (inverted_tcc_comparison)
3127 ncmp (inverted_tcc_comparison_with_nans)
3128 /* Ideally we'd like to combine the following two patterns
3129 and handle some more cases by using
3130 (logical_inverted_value (cmp @0 @1))
3131 here but for that genmatch would need to "inline" that.
3132 For now implement what forward_propagate_comparison did. */
3133 (simplify
3134 (bit_not (cmp @0 @1))
3135 (if (VECTOR_TYPE_P (type)
3136 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3137 /* Comparison inversion may be impossible for trapping math,
3138 invert_tree_comparison will tell us. But we can't use
3139 a computed operator in the replacement tree thus we have
3140 to play the trick below. */
3141 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3142 (cmp, HONOR_NANS (@0)); }
f84e7fd6 3143 (if (ic == icmp)
8fdc6c67
RB
3144 (icmp @0 @1)
3145 (if (ic == ncmp)
3146 (ncmp @0 @1))))))
f84e7fd6 3147 (simplify
09240451
MG
3148 (bit_xor (cmp @0 @1) integer_truep)
3149 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3150 (cmp, HONOR_NANS (@0)); }
09240451 3151 (if (ic == icmp)
8fdc6c67
RB
3152 (icmp @0 @1)
3153 (if (ic == ncmp)
3154 (ncmp @0 @1))))))
e18c1d66 3155
2ee05f1e
RB
3156/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3157 ??? The transformation is valid for the other operators if overflow
3158 is undefined for the type, but performing it here badly interacts
3159 with the transformation in fold_cond_expr_with_comparison which
3160 attempts to synthetize ABS_EXPR. */
3161(for cmp (eq ne)
1af4ebf5
MG
3162 (for sub (minus pointer_diff)
3163 (simplify
3164 (cmp (sub@2 @0 @1) integer_zerop)
3165 (if (single_use (@2))
3166 (cmp @0 @1)))))
2ee05f1e
RB
3167
3168/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3169 signed arithmetic case. That form is created by the compiler
3170 often enough for folding it to be of value. One example is in
3171 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
3172(for cmp (simple_comparison)
3173 scmp (swapped_simple_comparison)
2ee05f1e 3174 (simplify
bc6e9db4 3175 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
3176 /* Handle unfolded multiplication by zero. */
3177 (if (integer_zerop (@1))
8fdc6c67
RB
3178 (cmp @1 @2)
3179 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
3180 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3181 && single_use (@3))
8fdc6c67
RB
3182 /* If @1 is negative we swap the sense of the comparison. */
3183 (if (tree_int_cst_sgn (@1) < 0)
3184 (scmp @0 @2)
3185 (cmp @0 @2))))))
03cc70b5 3186
2ee05f1e
RB
3187/* Simplify comparison of something with itself. For IEEE
3188 floating-point, we can only do some of these simplifications. */
287f8f17 3189(for cmp (eq ge le)
2ee05f1e
RB
3190 (simplify
3191 (cmp @0 @0)
287f8f17 3192 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3193 || ! HONOR_NANS (@0))
287f8f17
RB
3194 { constant_boolean_node (true, type); }
3195 (if (cmp != EQ_EXPR)
3196 (eq @0 @0)))))
2ee05f1e
RB
3197(for cmp (ne gt lt)
3198 (simplify
3199 (cmp @0 @0)
3200 (if (cmp != NE_EXPR
3201 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3202 || ! HONOR_NANS (@0))
2ee05f1e 3203 { constant_boolean_node (false, type); })))
b5d3d787
RB
3204(for cmp (unle unge uneq)
3205 (simplify
3206 (cmp @0 @0)
3207 { constant_boolean_node (true, type); }))
dd53d197
MG
3208(for cmp (unlt ungt)
3209 (simplify
3210 (cmp @0 @0)
3211 (unordered @0 @0)))
b5d3d787
RB
3212(simplify
3213 (ltgt @0 @0)
3214 (if (!flag_trapping_math)
3215 { constant_boolean_node (false, type); }))
2ee05f1e
RB
3216
3217/* Fold ~X op ~Y as Y op X. */
07cdc2b8 3218(for cmp (simple_comparison)
2ee05f1e 3219 (simplify
7fe996ba
RB
3220 (cmp (bit_not@2 @0) (bit_not@3 @1))
3221 (if (single_use (@2) && single_use (@3))
3222 (cmp @1 @0))))
2ee05f1e
RB
3223
3224/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
3225(for cmp (simple_comparison)
3226 scmp (swapped_simple_comparison)
2ee05f1e 3227 (simplify
7fe996ba
RB
3228 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3229 (if (single_use (@2)
3230 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
3231 (scmp @0 (bit_not @1)))))
3232
07cdc2b8
RB
3233(for cmp (simple_comparison)
3234 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3235 (simplify
3236 (cmp (convert@2 @0) (convert? @1))
3237 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3238 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3239 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3240 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3241 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3242 (with
3243 {
3244 tree type1 = TREE_TYPE (@1);
3245 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3246 {
3247 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3248 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3249 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3250 type1 = float_type_node;
3251 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3252 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3253 type1 = double_type_node;
3254 }
3255 tree newtype
3256 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
03cc70b5 3257 ? TREE_TYPE (@0) : type1);
07cdc2b8
RB
3258 }
3259 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3260 (cmp (convert:newtype @0) (convert:newtype @1))))))
03cc70b5 3261
07cdc2b8
RB
3262 (simplify
3263 (cmp @0 REAL_CST@1)
3264 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
3265 (switch
3266 /* a CMP (-0) -> a CMP 0 */
3267 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3268 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3269 /* x != NaN is always true, other ops are always false. */
3270 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3271 && ! HONOR_SNANS (@1))
3272 { constant_boolean_node (cmp == NE_EXPR, type); })
3273 /* Fold comparisons against infinity. */
3274 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3275 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3276 (with
3277 {
3278 REAL_VALUE_TYPE max;
3279 enum tree_code code = cmp;
3280 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3281 if (neg)
3282 code = swap_tree_comparison (code);
3283 }
3284 (switch
e96a5786 3285 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
64d3a1f0 3286 (if (code == GT_EXPR
e96a5786 3287 && !(HONOR_NANS (@0) && flag_trapping_math))
64d3a1f0
RB
3288 { constant_boolean_node (false, type); })
3289 (if (code == LE_EXPR)
e96a5786 3290 /* x <= +Inf is always true, if we don't care about NaNs. */
64d3a1f0
RB
3291 (if (! HONOR_NANS (@0))
3292 { constant_boolean_node (true, type); }
e96a5786
JM
3293 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3294 an "invalid" exception. */
3295 (if (!flag_trapping_math)
3296 (eq @0 @0))))
3297 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3298 for == this introduces an exception for x a NaN. */
3299 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3300 || code == GE_EXPR)
64d3a1f0
RB
3301 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3302 (if (neg)
3303 (lt @0 { build_real (TREE_TYPE (@0), max); })
3304 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3305 /* x < +Inf is always equal to x <= DBL_MAX. */
3306 (if (code == LT_EXPR)
3307 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3308 (if (neg)
3309 (ge @0 { build_real (TREE_TYPE (@0), max); })
3310 (le @0 { build_real (TREE_TYPE (@0), max); }))))
e96a5786
JM
3311 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3312 an exception for x a NaN so use an unordered comparison. */
64d3a1f0
RB
3313 (if (code == NE_EXPR)
3314 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3315 (if (! HONOR_NANS (@0))
3316 (if (neg)
3317 (ge @0 { build_real (TREE_TYPE (@0), max); })
3318 (le @0 { build_real (TREE_TYPE (@0), max); }))
3319 (if (neg)
e96a5786
JM
3320 (unge @0 { build_real (TREE_TYPE (@0), max); })
3321 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
07cdc2b8
RB
3322
3323 /* If this is a comparison of a real constant with a PLUS_EXPR
3324 or a MINUS_EXPR of a real constant, we can convert it into a
3325 comparison with a revised real constant as long as no overflow
3326 occurs when unsafe_math_optimizations are enabled. */
3327 (if (flag_unsafe_math_optimizations)
3328 (for op (plus minus)
3329 (simplify
3330 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3331 (with
3332 {
3333 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3334 TREE_TYPE (@1), @2, @1);
3335 }
f980c9a2 3336 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
3337 (cmp @0 { tem; }))))))
3338
3339 /* Likewise, we can simplify a comparison of a real constant with
3340 a MINUS_EXPR whose first operand is also a real constant, i.e.
3341 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3342 floating-point types only if -fassociative-math is set. */
3343 (if (flag_associative_math)
3344 (simplify
0409237b 3345 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 3346 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 3347 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
3348 (cmp { tem; } @1)))))
3349
3350 /* Fold comparisons against built-in math functions. */
3351 (if (flag_unsafe_math_optimizations
3352 && ! flag_errno_math)
3353 (for sq (SQRT)
3354 (simplify
3355 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
3356 (switch
3357 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3358 (switch
3359 /* sqrt(x) < y is always false, if y is negative. */
3360 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 3361 { constant_boolean_node (false, type); })
64d3a1f0
RB
3362 /* sqrt(x) > y is always true, if y is negative and we
3363 don't care about NaNs, i.e. negative values of x. */
3364 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3365 { constant_boolean_node (true, type); })
3366 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3367 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
3368 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3369 (switch
3370 /* sqrt(x) < 0 is always false. */
3371 (if (cmp == LT_EXPR)
3372 { constant_boolean_node (false, type); })
3373 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3374 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3375 { constant_boolean_node (true, type); })
3376 /* sqrt(x) <= 0 -> x == 0. */
3377 (if (cmp == LE_EXPR)
3378 (eq @0 @1))
3379 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3380 == or !=. In the last case:
3381
3382 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3383
3384 if x is negative or NaN. Due to -funsafe-math-optimizations,
3385 the results for other x follow from natural arithmetic. */
3386 (cmp @0 @1)))
64d3a1f0
RB
3387 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3388 (with
3389 {
3390 REAL_VALUE_TYPE c2;
5c88ea94
RS
3391 real_arithmetic (&c2, MULT_EXPR,
3392 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
3393 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3394 }
3395 (if (REAL_VALUE_ISINF (c2))
3396 /* sqrt(x) > y is x == +Inf, when y is very large. */
3397 (if (HONOR_INFINITIES (@0))
3398 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3399 { constant_boolean_node (false, type); })
3400 /* sqrt(x) > c is the same as x > c*c. */
3401 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3402 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3403 (with
3404 {
3405 REAL_VALUE_TYPE c2;
5c88ea94
RS
3406 real_arithmetic (&c2, MULT_EXPR,
3407 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
3408 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3409 }
3410 (if (REAL_VALUE_ISINF (c2))
3411 (switch
3412 /* sqrt(x) < y is always true, when y is a very large
3413 value and we don't care about NaNs or Infinities. */
3414 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3415 { constant_boolean_node (true, type); })
3416 /* sqrt(x) < y is x != +Inf when y is very large and we
3417 don't care about NaNs. */
3418 (if (! HONOR_NANS (@0))
3419 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3420 /* sqrt(x) < y is x >= 0 when y is very large and we
3421 don't care about Infinities. */
3422 (if (! HONOR_INFINITIES (@0))
3423 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3424 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3425 (if (GENERIC)
3426 (truth_andif
3427 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3428 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3429 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3430 (if (! HONOR_NANS (@0))
3431 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3432 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3433 (if (GENERIC)
3434 (truth_andif
3435 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
0ca2e7f7
PK
3436 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3437 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3438 (simplify
3439 (cmp (sq @0) (sq @1))
3440 (if (! HONOR_NANS (@0))
3441 (cmp @0 @1))))))
2ee05f1e 3442
e41ec71b 3443/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
f3842847
YG
3444(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3445 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
e41ec71b
YG
3446 (simplify
3447 (cmp (float@0 @1) (float @2))
3448 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3449 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3450 (with
3451 {
3452 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3453 tree type1 = TREE_TYPE (@1);
3454 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3455 tree type2 = TREE_TYPE (@2);
3456 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3457 }
3458 (if (fmt.can_represent_integral_type_p (type1)
3459 && fmt.can_represent_integral_type_p (type2))
f3842847
YG
3460 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3461 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3462 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3463 && type1_signed_p >= type2_signed_p)
3464 (icmp @1 (convert @2))
3465 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3466 && type1_signed_p <= type2_signed_p)
3467 (icmp (convert:type2 @1) @2)
3468 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3469 && type1_signed_p == type2_signed_p)
3470 (icmp @1 @2))))))))))
e41ec71b 3471
c779bea5
YG
3472/* Optimize various special cases of (FTYPE) N CMP CST. */
3473(for cmp (lt le eq ne ge gt)
3474 icmp (le le eq ne ge ge)
3475 (simplify
3476 (cmp (float @0) REAL_CST@1)
3477 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3478 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3479 (with
3480 {
3481 tree itype = TREE_TYPE (@0);
c779bea5
YG
3482 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3483 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3484 /* Be careful to preserve any potential exceptions due to
3485 NaNs. qNaNs are ok in == or != context.
3486 TODO: relax under -fno-trapping-math or
3487 -fno-signaling-nans. */
3488 bool exception_p
3489 = real_isnan (cst) && (cst->signalling
c651dca2 3490 || (cmp != EQ_EXPR && cmp != NE_EXPR));
c779bea5
YG
3491 }
3492 /* TODO: allow non-fitting itype and SNaNs when
3493 -fno-trapping-math. */
e41ec71b 3494 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
c779bea5
YG
3495 (with
3496 {
e41ec71b 3497 signop isign = TYPE_SIGN (itype);
c779bea5
YG
3498 REAL_VALUE_TYPE imin, imax;
3499 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3500 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3501
3502 REAL_VALUE_TYPE icst;
3503 if (cmp == GT_EXPR || cmp == GE_EXPR)
3504 real_ceil (&icst, fmt, cst);
3505 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3506 real_floor (&icst, fmt, cst);
3507 else
3508 real_trunc (&icst, fmt, cst);
3509
b09bf97b 3510 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
c779bea5
YG
3511
3512 bool overflow_p = false;
3513 wide_int icst_val
3514 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3515 }
3516 (switch
3517 /* Optimize cases when CST is outside of ITYPE's range. */
3518 (if (real_compare (LT_EXPR, cst, &imin))
3519 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3520 type); })
3521 (if (real_compare (GT_EXPR, cst, &imax))
3522 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3523 type); })
3524 /* Remove cast if CST is an integer representable by ITYPE. */
3525 (if (cst_int_p)
3526 (cmp @0 { gcc_assert (!overflow_p);
3527 wide_int_to_tree (itype, icst_val); })
3528 )
3529 /* When CST is fractional, optimize
3530 (FTYPE) N == CST -> 0
3531 (FTYPE) N != CST -> 1. */
3532 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
03cc70b5 3533 { constant_boolean_node (cmp == NE_EXPR, type); })
c779bea5
YG
3534 /* Otherwise replace with sensible integer constant. */
3535 (with
3536 {
3537 gcc_checking_assert (!overflow_p);
3538 }
3539 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3540
40fd269a
MG
3541/* Fold A /[ex] B CMP C to A CMP B * C. */
3542(for cmp (eq ne)
3543 (simplify
3544 (cmp (exact_div @0 @1) INTEGER_CST@2)
3545 (if (!integer_zerop (@1))
8e6cdc90 3546 (if (wi::to_wide (@2) == 0)
40fd269a
MG
3547 (cmp @0 @2)
3548 (if (TREE_CODE (@1) == INTEGER_CST)
3549 (with
3550 {
4a669ac3 3551 wi::overflow_type ovf;
8e6cdc90
RS
3552 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3553 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
3554 }
3555 (if (ovf)
3556 { constant_boolean_node (cmp == NE_EXPR, type); }
3557 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3558(for cmp (lt le gt ge)
3559 (simplify
3560 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90 3561 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
40fd269a
MG
3562 (with
3563 {
4a669ac3 3564 wi::overflow_type ovf;
8e6cdc90
RS
3565 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3566 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
3567 }
3568 (if (ovf)
8e6cdc90
RS
3569 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3570 TYPE_SIGN (TREE_TYPE (@2)))
40fd269a
MG
3571 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3572 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3573
cfdc4f33
MG
3574/* Unordered tests if either argument is a NaN. */
3575(simplify
3576 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 3577 (if (types_match (@0, @1))
cfdc4f33 3578 (unordered @0 @1)))
257b01ba
MG
3579(simplify
3580 (bit_and (ordered @0 @0) (ordered @1 @1))
3581 (if (types_match (@0, @1))
3582 (ordered @0 @1)))
cfdc4f33
MG
3583(simplify
3584 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3585 @2)
257b01ba
MG
3586(simplify
3587 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3588 @2)
e18c1d66 3589
90c6f26c
RB
3590/* Simple range test simplifications. */
3591/* A < B || A >= B -> true. */
5d30c58d
RB
3592(for test1 (lt le le le ne ge)
3593 test2 (ge gt ge ne eq ne)
90c6f26c
RB
3594 (simplify
3595 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3596 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3597 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3598 { constant_boolean_node (true, type); })))
3599/* A < B && A >= B -> false. */
3600(for test1 (lt lt lt le ne eq)
3601 test2 (ge gt eq gt eq gt)
3602 (simplify
3603 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3604 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3605 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3606 { constant_boolean_node (false, type); })))
3607
9ebc3467
YG
3608/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3609 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3610
3611 Note that comparisons
3612 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3613 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3614 will be canonicalized to above so there's no need to
3615 consider them here.
3616 */
3617
3618(for cmp (le gt)
3619 eqcmp (eq ne)
3620 (simplify
3621 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3622 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3623 (with
3624 {
3625 tree ty = TREE_TYPE (@0);
3626 unsigned prec = TYPE_PRECISION (ty);
3627 wide_int mask = wi::to_wide (@2, prec);
3628 wide_int rhs = wi::to_wide (@3, prec);
3629 signop sgn = TYPE_SIGN (ty);
3630 }
3631 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3632 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3633 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3634 { build_zero_cst (ty); }))))))
3635
534bd33b
MG
3636/* -A CMP -B -> B CMP A. */
3637(for cmp (tcc_comparison)
3638 scmp (swapped_tcc_comparison)
3639 (simplify
3640 (cmp (negate @0) (negate @1))
3641 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3642 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3643 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3644 (scmp @0 @1)))
3645 (simplify
3646 (cmp (negate @0) CONSTANT_CLASS_P@1)
3647 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3648 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3649 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 3650 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
3651 (if (tem && !TREE_OVERFLOW (tem))
3652 (scmp @0 { tem; }))))))
3653
b0eb889b
MG
3654/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3655(for op (eq ne)
3656 (simplify
3657 (op (abs @0) zerop@1)
3658 (op @0 @1)))
3659
6358a676
MG
3660/* From fold_sign_changed_comparison and fold_widened_comparison.
3661 FIXME: the lack of symmetry is disturbing. */
79d4f7c6
RB
3662(for cmp (simple_comparison)
3663 (simplify
3664 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 3665 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
3666 /* Disable this optimization if we're casting a function pointer
3667 type on targets that require function pointer canonicalization. */
3668 && !(targetm.have_canonicalize_funcptr_for_compare ()
400bc526
JDA
3669 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3670 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3671 || (POINTER_TYPE_P (TREE_TYPE (@10))
3672 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
2fde61e3 3673 && single_use (@0))
79d4f7c6
RB
3674 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3675 && (TREE_CODE (@10) == INTEGER_CST
6358a676 3676 || @1 != @10)
79d4f7c6
RB
3677 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3678 || cmp == NE_EXPR
3679 || cmp == EQ_EXPR)
6358a676 3680 && !POINTER_TYPE_P (TREE_TYPE (@00)))
79d4f7c6
RB
3681 /* ??? The special-casing of INTEGER_CST conversion was in the original
3682 code and here to avoid a spurious overflow flag on the resulting
3683 constant which fold_convert produces. */
3684 (if (TREE_CODE (@1) == INTEGER_CST)
3685 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3686 TREE_OVERFLOW (@1)); })
3687 (cmp @00 (convert @1)))
3688
3689 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3690 /* If possible, express the comparison in the shorter mode. */
3691 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
3692 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3693 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3694 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
3695 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3696 || ((TYPE_PRECISION (TREE_TYPE (@00))
3697 >= TYPE_PRECISION (TREE_TYPE (@10)))
3698 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3699 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3700 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 3701 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
3702 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3703 (cmp @00 (convert @10))
3704 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 3705 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
3706 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3707 (with
3708 {
3709 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3710 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3711 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3712 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3713 }
3714 (if (above || below)
3715 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3716 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3717 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3718 { constant_boolean_node (above ? true : false, type); }
3719 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3720 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 3721
96a111a3
RB
3722(for cmp (eq ne)
3723 /* A local variable can never be pointed to by
3724 the default SSA name of an incoming parameter.
3725 SSA names are canonicalized to 2nd place. */
3726 (simplify
3727 (cmp addr@0 SSA_NAME@1)
3728 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3729 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3730 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3731 (if (TREE_CODE (base) == VAR_DECL
3732 && auto_var_in_fn_p (base, current_function_decl))
3733 (if (cmp == NE_EXPR)
3734 { constant_boolean_node (true, type); }
3735 { constant_boolean_node (false, type); }))))))
3736
66e1cacf
RB
3737/* Equality compare simplifications from fold_binary */
3738(for cmp (eq ne)
3739
3740 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3741 Similarly for NE_EXPR. */
3742 (simplify
3743 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3744 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
8e6cdc90 3745 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
66e1cacf
RB
3746 { constant_boolean_node (cmp == NE_EXPR, type); }))
3747
3748 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3749 (simplify
3750 (cmp (bit_xor @0 @1) integer_zerop)
3751 (cmp @0 @1))
3752
3753 /* (X ^ Y) == Y becomes X == 0.
3754 Likewise (X ^ Y) == X becomes Y == 0. */
3755 (simplify
99e943a2 3756 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
3757 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3758
3759 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3760 (simplify
3761 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3762 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 3763 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
3764
3765 (simplify
3766 (cmp (convert? addr@0) integer_zerop)
3767 (if (tree_single_nonzero_warnv_p (@0, NULL))
3768 { constant_boolean_node (cmp == NE_EXPR, type); })))
3769
b0eb889b
MG
3770/* If we have (A & C) == C where C is a power of 2, convert this into
3771 (A & C) != 0. Similarly for NE_EXPR. */
3772(for cmp (eq ne)
3773 icmp (ne eq)
3774 (simplify
3775 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3776 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
03cc70b5 3777
519e0faa
PB
3778/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3779 convert this into a shift followed by ANDing with D. */
3780(simplify
3781 (cond
3782 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
9e61e48e
JJ
3783 INTEGER_CST@2 integer_zerop)
3784 (if (integer_pow2p (@2))
3785 (with {
3786 int shift = (wi::exact_log2 (wi::to_wide (@2))
3787 - wi::exact_log2 (wi::to_wide (@1)));
3788 }
3789 (if (shift > 0)
3790 (bit_and
3791 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3792 (bit_and
3793 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3794 @2)))))
519e0faa 3795
b0eb889b
MG
3796/* If we have (A & C) != 0 where C is the sign bit of A, convert
3797 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3798(for cmp (eq ne)
3799 ncmp (ge lt)
3800 (simplify
3801 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3802 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2be65d9e 3803 && type_has_mode_precision_p (TREE_TYPE (@0))
b0eb889b 3804 && element_precision (@2) >= element_precision (@0)
8e6cdc90 3805 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
b0eb889b
MG
3806 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3807 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3808
519e0faa 3809/* If we have A < 0 ? C : 0 where C is a power of 2, convert
c0140e3c 3810 this into a right shift or sign extension followed by ANDing with C. */
519e0faa
PB
3811(simplify
3812 (cond
3813 (lt @0 integer_zerop)
9e61e48e
JJ
3814 INTEGER_CST@1 integer_zerop)
3815 (if (integer_pow2p (@1)
3816 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
c0140e3c 3817 (with {
8e6cdc90 3818 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
c0140e3c
JJ
3819 }
3820 (if (shift >= 0)
3821 (bit_and
3822 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3823 @1)
3824 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3825 sign extension followed by AND with C will achieve the effect. */
3826 (bit_and (convert @0) @1)))))
519e0faa 3827
68aba1f6
RB
3828/* When the addresses are not directly of decls compare base and offset.
3829 This implements some remaining parts of fold_comparison address
3830 comparisons but still no complete part of it. Still it is good
3831 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3832(for cmp (simple_comparison)
3833 (simplify
f501d5cd 3834 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
3835 (with
3836 {
a90c8804 3837 poly_int64 off0, off1;
68aba1f6
RB
3838 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3839 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3840 if (base0 && TREE_CODE (base0) == MEM_REF)
3841 {
aca52e6f 3842 off0 += mem_ref_offset (base0).force_shwi ();
68aba1f6
RB
3843 base0 = TREE_OPERAND (base0, 0);
3844 }
3845 if (base1 && TREE_CODE (base1) == MEM_REF)
3846 {
aca52e6f 3847 off1 += mem_ref_offset (base1).force_shwi ();
68aba1f6
RB
3848 base1 = TREE_OPERAND (base1, 0);
3849 }
3850 }
da571fda
RB
3851 (if (base0 && base1)
3852 (with
3853 {
aad88aed 3854 int equal = 2;
70f40fea
JJ
3855 /* Punt in GENERIC on variables with value expressions;
3856 the value expressions might point to fields/elements
3857 of other vars etc. */
3858 if (GENERIC
3859 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3860 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3861 ;
3862 else if (decl_in_symtab_p (base0)
3863 && decl_in_symtab_p (base1))
da571fda
RB
3864 equal = symtab_node::get_create (base0)
3865 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
3866 else if ((DECL_P (base0)
3867 || TREE_CODE (base0) == SSA_NAME
3868 || TREE_CODE (base0) == STRING_CST)
3869 && (DECL_P (base1)
3870 || TREE_CODE (base1) == SSA_NAME
3871 || TREE_CODE (base1) == STRING_CST))
aad88aed 3872 equal = (base0 == base1);
da571fda 3873 }
3fccbb9e
JJ
3874 (if (equal == 1
3875 && (cmp == EQ_EXPR || cmp == NE_EXPR
3876 /* If the offsets are equal we can ignore overflow. */
3877 || known_eq (off0, off1)
3878 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3879 /* Or if we compare using pointers to decls or strings. */
3880 || (POINTER_TYPE_P (TREE_TYPE (@2))
3881 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
da571fda 3882 (switch
a90c8804
RS
3883 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3884 { constant_boolean_node (known_eq (off0, off1), type); })
3885 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3886 { constant_boolean_node (known_ne (off0, off1), type); })
3887 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3888 { constant_boolean_node (known_lt (off0, off1), type); })
3889 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3890 { constant_boolean_node (known_le (off0, off1), type); })
3891 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3892 { constant_boolean_node (known_ge (off0, off1), type); })
3893 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3894 { constant_boolean_node (known_gt (off0, off1), type); }))
da571fda
RB
3895 (if (equal == 0
3896 && DECL_P (base0) && DECL_P (base1)
3897 /* If we compare this as integers require equal offset. */
3898 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
a90c8804 3899 || known_eq (off0, off1)))
da571fda
RB
3900 (switch
3901 (if (cmp == EQ_EXPR)
3902 { constant_boolean_node (false, type); })
3903 (if (cmp == NE_EXPR)
3904 { constant_boolean_node (true, type); })))))))))
66e1cacf 3905
98998245
RB
3906/* Simplify pointer equality compares using PTA. */
3907(for neeq (ne eq)
3908 (simplify
3909 (neeq @0 @1)
3910 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3911 && ptrs_compare_unequal (@0, @1))
f913ff2a 3912 { constant_boolean_node (neeq != EQ_EXPR, type); })))
98998245 3913
8f63caf6 3914/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
3915 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3916 Disable the transform if either operand is pointer to function.
3917 This broke pr22051-2.c for arm where function pointer
3918 canonicalizaion is not wanted. */
1c0a8806 3919
8f63caf6
RB
3920(for cmp (ne eq)
3921 (simplify
3922 (cmp (convert @0) INTEGER_CST@1)
f53e7e13
JJ
3923 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
3924 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3925 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3926 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3927 && POINTER_TYPE_P (TREE_TYPE (@1))
3928 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3929 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
8f63caf6
RB
3930 (cmp @0 (convert @1)))))
3931
21aacde4
RB
3932/* Non-equality compare simplifications from fold_binary */
3933(for cmp (lt gt le ge)
3934 /* Comparisons with the highest or lowest possible integer of
3935 the specified precision will have known values. */
3936 (simplify
3937 (cmp (convert?@2 @0) INTEGER_CST@1)
3938 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3939 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3940 (with
3941 {
3942 tree arg1_type = TREE_TYPE (@1);
3943 unsigned int prec = TYPE_PRECISION (arg1_type);
3944 wide_int max = wi::max_value (arg1_type);
3945 wide_int signed_max = wi::max_value (prec, SIGNED);
3946 wide_int min = wi::min_value (arg1_type);
3947 }
3948 (switch
8e6cdc90 3949 (if (wi::to_wide (@1) == max)
21aacde4
RB
3950 (switch
3951 (if (cmp == GT_EXPR)
3952 { constant_boolean_node (false, type); })
3953 (if (cmp == GE_EXPR)
3954 (eq @2 @1))
3955 (if (cmp == LE_EXPR)
3956 { constant_boolean_node (true, type); })
3957 (if (cmp == LT_EXPR)
3958 (ne @2 @1))))
8e6cdc90 3959 (if (wi::to_wide (@1) == min)
21aacde4
RB
3960 (switch
3961 (if (cmp == LT_EXPR)
3962 { constant_boolean_node (false, type); })
3963 (if (cmp == LE_EXPR)
3964 (eq @2 @1))
3965 (if (cmp == GE_EXPR)
3966 { constant_boolean_node (true, type); })
3967 (if (cmp == GT_EXPR)
3968 (ne @2 @1))))
8e6cdc90 3969 (if (wi::to_wide (@1) == max - 1)
9bc22d19
RB
3970 (switch
3971 (if (cmp == GT_EXPR)
8e6cdc90 3972 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
9bc22d19 3973 (if (cmp == LE_EXPR)
8e6cdc90
RS
3974 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3975 (if (wi::to_wide (@1) == min + 1)
21aacde4
RB
3976 (switch
3977 (if (cmp == GE_EXPR)
8e6cdc90 3978 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
21aacde4 3979 (if (cmp == LT_EXPR)
8e6cdc90
RS
3980 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3981 (if (wi::to_wide (@1) == signed_max
21aacde4
RB
3982 && TYPE_UNSIGNED (arg1_type)
3983 /* We will flip the signedness of the comparison operator
3984 associated with the mode of @1, so the sign bit is
3985 specified by this mode. Check that @1 is the signed
3986 max associated with this sign bit. */
7a504f33 3987 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
21aacde4
RB
3988 /* signed_type does not work on pointer types. */
3989 && INTEGRAL_TYPE_P (arg1_type))
3990 /* The following case also applies to X < signed_max+1
3991 and X >= signed_max+1 because previous transformations. */
3992 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3993 (with { tree st = signed_type_for (arg1_type); }
3994 (if (cmp == LE_EXPR)
3995 (ge (convert:st @0) { build_zero_cst (st); })
3996 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
03cc70b5 3997
b5d3d787
RB
3998(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3999 /* If the second operand is NaN, the result is constant. */
4000 (simplify
4001 (cmp @0 REAL_CST@1)
4002 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4003 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 4004 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 4005 ? false : true, type); })))
21aacde4 4006
55cf3946
RB
4007/* bool_var != 0 becomes bool_var. */
4008(simplify
b5d3d787 4009 (ne @0 integer_zerop)
55cf3946
RB
4010 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4011 && types_match (type, TREE_TYPE (@0)))
4012 (non_lvalue @0)))
4013/* bool_var == 1 becomes bool_var. */
4014(simplify
b5d3d787 4015 (eq @0 integer_onep)
55cf3946
RB
4016 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4017 && types_match (type, TREE_TYPE (@0)))
4018 (non_lvalue @0)))
b5d3d787
RB
4019/* Do not handle
4020 bool_var == 0 becomes !bool_var or
4021 bool_var != 1 becomes !bool_var
4022 here because that only is good in assignment context as long
4023 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4024 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4025 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 4026
ca1206be
MG
4027/* When one argument is a constant, overflow detection can be simplified.
4028 Currently restricted to single use so as not to interfere too much with
4029 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4030 A + CST CMP A -> A CMP' CST' */
4031(for cmp (lt le ge gt)
4032 out (gt gt le le)
4033 (simplify
a8e9f9a3 4034 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
4035 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4036 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
8e6cdc90 4037 && wi::to_wide (@1) != 0
ca1206be 4038 && single_use (@2))
8e6cdc90
RS
4039 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4040 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4041 wi::max_value (prec, UNSIGNED)
4042 - wi::to_wide (@1)); })))))
ca1206be 4043
3563f78f
MG
4044/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4045 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4046 expects the long form, so we restrict the transformation for now. */
4047(for cmp (gt le)
4048 (simplify
a8e9f9a3 4049 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
4050 (if (single_use (@2)
4051 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4052 && TYPE_UNSIGNED (TREE_TYPE (@0))
4053 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4054 (cmp @1 @0))))
3563f78f
MG
4055
4056/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
4057/* A - B > A */
4058(for cmp (gt le)
4059 out (ne eq)
4060 (simplify
a8e9f9a3 4061 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
4062 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4063 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4064 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4065/* A + B < A */
4066(for cmp (lt ge)
4067 out (ne eq)
4068 (simplify
a8e9f9a3 4069 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
4070 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4071 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4072 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4073
603aeb87 4074/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 4075 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
4076(for cmp (lt ge)
4077 out (ne eq)
4078 (simplify
603aeb87 4079 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
4080 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4081 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4082 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 4083
53f3cd25
RS
4084/* Simplification of math builtins. These rules must all be optimizations
4085 as well as IL simplifications. If there is a possibility that the new
4086 form could be a pessimization, the rule should go in the canonicalization
4087 section that follows this one.
e18c1d66 4088
53f3cd25
RS
4089 Rules can generally go in this section if they satisfy one of
4090 the following:
4091
4092 - the rule describes an identity
4093
4094 - the rule replaces calls with something as simple as addition or
4095 multiplication
4096
4097 - the rule contains unary calls only and simplifies the surrounding
4098 arithmetic. (The idea here is to exclude non-unary calls in which
4099 one operand is constant and in which the call is known to be cheap
4100 when the operand has that value.) */
52c6378a 4101
53f3cd25 4102(if (flag_unsafe_math_optimizations)
52c6378a
N
4103 /* Simplify sqrt(x) * sqrt(x) -> x. */
4104 (simplify
c6cfa2bf 4105 (mult (SQRT_ALL@1 @0) @1)
52c6378a
N
4106 (if (!HONOR_SNANS (type))
4107 @0))
4108
ed17cb57
JW
4109 (for op (plus minus)
4110 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4111 (simplify
4112 (op (rdiv @0 @1)
4113 (rdiv @2 @1))
4114 (rdiv (op @0 @2) @1)))
4115
5e21d765
WD
4116 (for cmp (lt le gt ge)
4117 neg_cmp (gt ge lt le)
4118 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4119 (simplify
4120 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4121 (with
4122 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4123 (if (tem
4124 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4125 || (real_zerop (tem) && !real_zerop (@1))))
4126 (switch
4127 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4128 (cmp @0 { tem; }))
4129 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4130 (neg_cmp @0 { tem; })))))))
4131
35401640
N
4132 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4133 (for root (SQRT CBRT)
4134 (simplify
4135 (mult (root:s @0) (root:s @1))
4136 (root (mult @0 @1))))
4137
35401640
N
4138 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4139 (for exps (EXP EXP2 EXP10 POW10)
4140 (simplify
4141 (mult (exps:s @0) (exps:s @1))
4142 (exps (plus @0 @1))))
4143
52c6378a 4144 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
4145 (for root (SQRT CBRT)
4146 (simplify
4147 (rdiv @0 (root:s (rdiv:s @1 @2)))
4148 (mult @0 (root (rdiv @2 @1)))))
4149
4150 /* Simplify x/expN(y) into x*expN(-y). */
4151 (for exps (EXP EXP2 EXP10 POW10)
4152 (simplify
4153 (rdiv @0 (exps:s @1))
4154 (mult @0 (exps (negate @1)))))
52c6378a 4155
eee7b6c4
RB
4156 (for logs (LOG LOG2 LOG10 LOG10)
4157 exps (EXP EXP2 EXP10 POW10)
8acda9b2 4158 /* logN(expN(x)) -> x. */
e18c1d66
RB
4159 (simplify
4160 (logs (exps @0))
8acda9b2
RS
4161 @0)
4162 /* expN(logN(x)) -> x. */
4163 (simplify
4164 (exps (logs @0))
4165 @0))
53f3cd25 4166
e18c1d66
RB
4167 /* Optimize logN(func()) for various exponential functions. We
4168 want to determine the value "x" and the power "exponent" in
4169 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
4170 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4171 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
4172 (simplify
4173 (logs (exps @0))
c9e926ce
RS
4174 (if (SCALAR_FLOAT_TYPE_P (type))
4175 (with {
4176 tree x;
4177 switch (exps)
4178 {
4179 CASE_CFN_EXP:
4180 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4181 x = build_real_truncate (type, dconst_e ());
4182 break;
4183 CASE_CFN_EXP2:
4184 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4185 x = build_real (type, dconst2);
4186 break;
4187 CASE_CFN_EXP10:
4188 CASE_CFN_POW10:
4189 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4190 {
4191 REAL_VALUE_TYPE dconst10;
4192 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4193 x = build_real (type, dconst10);
4194 }
4195 break;
4196 default:
4197 gcc_unreachable ();
4198 }
4199 }
4200 (mult (logs { x; }) @0)))))
53f3cd25 4201
e18c1d66
RB
4202 (for logs (LOG LOG
4203 LOG2 LOG2
4204 LOG10 LOG10)
4205 exps (SQRT CBRT)
4206 (simplify
4207 (logs (exps @0))
c9e926ce
RS
4208 (if (SCALAR_FLOAT_TYPE_P (type))
4209 (with {
4210 tree x;
4211 switch (exps)
4212 {
4213 CASE_CFN_SQRT:
4214 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4215 x = build_real (type, dconsthalf);
4216 break;
4217 CASE_CFN_CBRT:
4218 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4219 x = build_real_truncate (type, dconst_third ());
4220 break;
4221 default:
4222 gcc_unreachable ();
4223 }
4224 }
4225 (mult { x; } (logs @0))))))
53f3cd25
RS
4226
4227 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
4228 (for logs (LOG LOG2 LOG10)
4229 pows (POW)
4230 (simplify
4231 (logs (pows @0 @1))
53f3cd25
RS
4232 (mult @1 (logs @0))))
4233
848bb6fc
JJ
4234 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4235 or if C is a positive power of 2,
4236 pow(C,x) -> exp2(log2(C)*x). */
30a2c10e 4237#if GIMPLE
e83fe013
WD
4238 (for pows (POW)
4239 exps (EXP)
4240 logs (LOG)
848bb6fc
JJ
4241 exp2s (EXP2)
4242 log2s (LOG2)
e83fe013
WD
4243 (simplify
4244 (pows REAL_CST@0 @1)
848bb6fc 4245 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
ef7866a3
JJ
4246 && real_isfinite (TREE_REAL_CST_PTR (@0))
4247 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4248 the use_exp2 case until after vectorization. It seems actually
4249 beneficial for all constants to postpone this until later,
4250 because exp(log(C)*x), while faster, will have worse precision
4251 and if x folds into a constant too, that is unnecessary
4252 pessimization. */
4253 && canonicalize_math_after_vectorization_p ())
848bb6fc
JJ
4254 (with {
4255 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4256 bool use_exp2 = false;
4257 if (targetm.libc_has_function (function_c99_misc)
4258 && value->cl == rvc_normal)
4259 {
4260 REAL_VALUE_TYPE frac_rvt = *value;
4261 SET_REAL_EXP (&frac_rvt, 1);
4262 if (real_equal (&frac_rvt, &dconst1))
4263 use_exp2 = true;
4264 }
4265 }
4266 (if (!use_exp2)
30a2c10e
JJ
4267 (if (optimize_pow_to_exp (@0, @1))
4268 (exps (mult (logs @0) @1)))
ef7866a3 4269 (exp2s (mult (log2s @0) @1)))))))
30a2c10e 4270#endif
e83fe013 4271
16ef0a8c
JJ
4272 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4273 (for pows (POW)
4274 exps (EXP EXP2 EXP10 POW10)
4275 logs (LOG LOG2 LOG10 LOG10)
4276 (simplify
4277 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4278 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4279 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4280 (exps (plus (mult (logs @0) @1) @2)))))
4281
53f3cd25
RS
4282 (for sqrts (SQRT)
4283 cbrts (CBRT)
b4838d77 4284 pows (POW)
53f3cd25
RS
4285 exps (EXP EXP2 EXP10 POW10)
4286 /* sqrt(expN(x)) -> expN(x*0.5). */
4287 (simplify
4288 (sqrts (exps @0))
4289 (exps (mult @0 { build_real (type, dconsthalf); })))
4290 /* cbrt(expN(x)) -> expN(x/3). */
4291 (simplify
4292 (cbrts (exps @0))
b4838d77
RS
4293 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4294 /* pow(expN(x), y) -> expN(x*y). */
4295 (simplify
4296 (pows (exps @0) @1)
4297 (exps (mult @0 @1))))
cfed37a0
RS
4298
4299 /* tan(atan(x)) -> x. */
4300 (for tans (TAN)
4301 atans (ATAN)
4302 (simplify
4303 (tans (atans @0))
4304 @0)))
53f3cd25 4305
121ef08b
GB
4306 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4307 (for sins (SIN)
4308 atans (ATAN)
4309 sqrts (SQRT)
4310 copysigns (COPYSIGN)
4311 (simplify
4312 (sins (atans:s @0))
4313 (with
4314 {
4315 REAL_VALUE_TYPE r_cst;
4316 build_sinatan_real (&r_cst, type);
4317 tree t_cst = build_real (type, r_cst);
4318 tree t_one = build_one_cst (type);
4319 }
4320 (if (SCALAR_FLOAT_TYPE_P (type))
4321 (cond (le (abs @0) { t_cst; })
4322 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4323 (copysigns { t_one; } @0))))))
4324
4325/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4326 (for coss (COS)
4327 atans (ATAN)
4328 sqrts (SQRT)
4329 copysigns (COPYSIGN)
4330 (simplify
4331 (coss (atans:s @0))
4332 (with
4333 {
4334 REAL_VALUE_TYPE r_cst;
4335 build_sinatan_real (&r_cst, type);
4336 tree t_cst = build_real (type, r_cst);
4337 tree t_one = build_one_cst (type);
4338 tree t_zero = build_zero_cst (type);
4339 }
4340 (if (SCALAR_FLOAT_TYPE_P (type))
4341 (cond (le (abs @0) { t_cst; })
4342 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4343 (copysigns { t_zero; } @0))))))
4344
abcc43f5
RS
4345/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4346(simplify
e04d2a35 4347 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
4348 (abs @0))
4349
67dbe582 4350/* trunc(trunc(x)) -> trunc(x), etc. */
c6cfa2bf 4351(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
4352 (simplify
4353 (fns (fns @0))
4354 (fns @0)))
4355/* f(x) -> x if x is integer valued and f does nothing for such values. */
c6cfa2bf 4356(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
4357 (simplify
4358 (fns integer_valued_real_p@0)
4359 @0))
67dbe582 4360
4d7836c4
RS
4361/* hypot(x,0) and hypot(0,x) -> abs(x). */
4362(simplify
c9e926ce 4363 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
4364 (abs @0))
4365
b4838d77
RS
4366/* pow(1,x) -> 1. */
4367(simplify
4368 (POW real_onep@0 @1)
4369 @0)
4370
461e4145
RS
4371(simplify
4372 /* copysign(x,x) -> x. */
c6cfa2bf 4373 (COPYSIGN_ALL @0 @0)
461e4145
RS
4374 @0)
4375
4376(simplify
4377 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
c6cfa2bf 4378 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
461e4145
RS
4379 (abs @0))
4380
86c0733f
RS
4381(for scale (LDEXP SCALBN SCALBLN)
4382 /* ldexp(0, x) -> 0. */
4383 (simplify
4384 (scale real_zerop@0 @1)
4385 @0)
4386 /* ldexp(x, 0) -> x. */
4387 (simplify
4388 (scale @0 integer_zerop@1)
4389 @0)
4390 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4391 (simplify
4392 (scale REAL_CST@0 @1)
4393 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4394 @0)))
4395
53f3cd25
RS
4396/* Canonicalization of sequences of math builtins. These rules represent
4397 IL simplifications but are not necessarily optimizations.
4398
4399 The sincos pass is responsible for picking "optimal" implementations
4400 of math builtins, which may be more complicated and can sometimes go
4401 the other way, e.g. converting pow into a sequence of sqrts.
4402 We only want to do these canonicalizations before the pass has run. */
4403
4404(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4405 /* Simplify tan(x) * cos(x) -> sin(x). */
4406 (simplify
4407 (mult:c (TAN:s @0) (COS:s @0))
4408 (SIN @0))
4409
4410 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4411 (simplify
de3fbea3 4412 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
4413 (if (!TREE_OVERFLOW (@1))
4414 (POW @0 (plus @1 { build_one_cst (type); }))))
4415
4416 /* Simplify sin(x) / cos(x) -> tan(x). */
4417 (simplify
4418 (rdiv (SIN:s @0) (COS:s @0))
4419 (TAN @0))
4420
4421 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4422 (simplify
4423 (rdiv (COS:s @0) (SIN:s @0))
4424 (rdiv { build_one_cst (type); } (TAN @0)))
4425
4426 /* Simplify sin(x) / tan(x) -> cos(x). */
4427 (simplify
4428 (rdiv (SIN:s @0) (TAN:s @0))
4429 (if (! HONOR_NANS (@0)
4430 && ! HONOR_INFINITIES (@0))
c9e926ce 4431 (COS @0)))
53f3cd25
RS
4432
4433 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4434 (simplify
4435 (rdiv (TAN:s @0) (SIN:s @0))
4436 (if (! HONOR_NANS (@0)
4437 && ! HONOR_INFINITIES (@0))
4438 (rdiv { build_one_cst (type); } (COS @0))))
4439
4440 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4441 (simplify
4442 (mult (POW:s @0 @1) (POW:s @0 @2))
4443 (POW @0 (plus @1 @2)))
4444
4445 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4446 (simplify
4447 (mult (POW:s @0 @1) (POW:s @2 @1))
4448 (POW (mult @0 @2) @1))
4449
de3fbea3
RB
4450 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4451 (simplify
4452 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4453 (POWI (mult @0 @2) @1))
4454
53f3cd25
RS
4455 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4456 (simplify
4457 (rdiv (POW:s @0 REAL_CST@1) @0)
4458 (if (!TREE_OVERFLOW (@1))
4459 (POW @0 (minus @1 { build_one_cst (type); }))))
4460
4461 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4462 (simplify
4463 (rdiv @0 (POW:s @1 @2))
4464 (mult @0 (POW @1 (negate @2))))
4465
4466 (for sqrts (SQRT)
4467 cbrts (CBRT)
4468 pows (POW)
4469 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4470 (simplify
4471 (sqrts (sqrts @0))
4472 (pows @0 { build_real (type, dconst_quarter ()); }))
4473 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4474 (simplify
4475 (sqrts (cbrts @0))
4476 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4477 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4478 (simplify
4479 (cbrts (sqrts @0))
4480 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4481 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4482 (simplify
4483 (cbrts (cbrts tree_expr_nonnegative_p@0))
4484 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4485 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4486 (simplify
4487 (sqrts (pows @0 @1))
4488 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4489 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4490 (simplify
4491 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
4492 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4493 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4494 (simplify
4495 (pows (sqrts @0) @1)
4496 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4497 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4498 (simplify
4499 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4500 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4501 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4502 (simplify
4503 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4504 (pows @0 (mult @1 @2))))
abcc43f5
RS
4505
4506 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4507 (simplify
4508 (CABS (complex @0 @0))
96285749
RS
4509 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4510
4d7836c4
RS
4511 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4512 (simplify
4513 (HYPOT @0 @0)
4514 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4515
96285749
RS
4516 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4517 (for cexps (CEXP)
4518 exps (EXP)
4519 cexpis (CEXPI)
4520 (simplify
4521 (cexps compositional_complex@0)
4522 (if (targetm.libc_has_function (function_c99_math_complex))
4523 (complex
4524 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4525 (mult @1 (imagpart @2)))))))
e18c1d66 4526
67dbe582
RS
4527(if (canonicalize_math_p ())
4528 /* floor(x) -> trunc(x) if x is nonnegative. */
c6cfa2bf
MM
4529 (for floors (FLOOR_ALL)
4530 truncs (TRUNC_ALL)
67dbe582
RS
4531 (simplify
4532 (floors tree_expr_nonnegative_p@0)
4533 (truncs @0))))
4534
4535(match double_value_p
4536 @0
4537 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4538(for froms (BUILT_IN_TRUNCL
4539 BUILT_IN_FLOORL
4540 BUILT_IN_CEILL
4541 BUILT_IN_ROUNDL
4542 BUILT_IN_NEARBYINTL
4543 BUILT_IN_RINTL)
4544 tos (BUILT_IN_TRUNC
4545 BUILT_IN_FLOOR
4546 BUILT_IN_CEIL
4547 BUILT_IN_ROUND
4548 BUILT_IN_NEARBYINT
4549 BUILT_IN_RINT)
4550 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4551 (if (optimize && canonicalize_math_p ())
4552 (simplify
4553 (froms (convert double_value_p@0))
4554 (convert (tos @0)))))
4555
4556(match float_value_p
4557 @0
4558 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4559(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4560 BUILT_IN_FLOORL BUILT_IN_FLOOR
4561 BUILT_IN_CEILL BUILT_IN_CEIL
4562 BUILT_IN_ROUNDL BUILT_IN_ROUND
4563 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4564 BUILT_IN_RINTL BUILT_IN_RINT)
4565 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4566 BUILT_IN_FLOORF BUILT_IN_FLOORF
4567 BUILT_IN_CEILF BUILT_IN_CEILF
4568 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4569 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4570 BUILT_IN_RINTF BUILT_IN_RINTF)
4571 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4572 if x is a float. */
5dac7dbd
JDA
4573 (if (optimize && canonicalize_math_p ()
4574 && targetm.libc_has_function (function_c99_misc))
67dbe582
RS
4575 (simplify
4576 (froms (convert float_value_p@0))
4577 (convert (tos @0)))))
4578
543a9bcd
RS
4579(for froms (XFLOORL XCEILL XROUNDL XRINTL)
4580 tos (XFLOOR XCEIL XROUND XRINT)
4581 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4582 (if (optimize && canonicalize_math_p ())
4583 (simplify
4584 (froms (convert double_value_p@0))
4585 (tos @0))))
4586
4587(for froms (XFLOORL XCEILL XROUNDL XRINTL
4588 XFLOOR XCEIL XROUND XRINT)
4589 tos (XFLOORF XCEILF XROUNDF XRINTF)
4590 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4591 if x is a float. */
4592 (if (optimize && canonicalize_math_p ())
4593 (simplify
4594 (froms (convert float_value_p@0))
4595 (tos @0))))
4596
4597(if (canonicalize_math_p ())
4598 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4599 (for floors (IFLOOR LFLOOR LLFLOOR)
4600 (simplify
4601 (floors tree_expr_nonnegative_p@0)
4602 (fix_trunc @0))))
4603
4604(if (canonicalize_math_p ())
4605 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4606 (for fns (IFLOOR LFLOOR LLFLOOR
4607 ICEIL LCEIL LLCEIL
4608 IROUND LROUND LLROUND)
4609 (simplify
4610 (fns integer_valued_real_p@0)
4611 (fix_trunc @0)))
4612 (if (!flag_errno_math)
4613 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4614 (for rints (IRINT LRINT LLRINT)
4615 (simplify
4616 (rints integer_valued_real_p@0)
4617 (fix_trunc @0)))))
4618
4619(if (canonicalize_math_p ())
4620 (for ifn (IFLOOR ICEIL IROUND IRINT)
4621 lfn (LFLOOR LCEIL LROUND LRINT)
4622 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4623 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4624 sizeof (int) == sizeof (long). */
4625 (if (TYPE_PRECISION (integer_type_node)
4626 == TYPE_PRECISION (long_integer_type_node))
4627 (simplify
4628 (ifn @0)
4629 (lfn:long_integer_type_node @0)))
4630 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4631 sizeof (long long) == sizeof (long). */
4632 (if (TYPE_PRECISION (long_long_integer_type_node)
4633 == TYPE_PRECISION (long_integer_type_node))
4634 (simplify
4635 (llfn @0)
4636 (lfn:long_integer_type_node @0)))))
4637
92c52eab
RS
4638/* cproj(x) -> x if we're ignoring infinities. */
4639(simplify
4640 (CPROJ @0)
4641 (if (!HONOR_INFINITIES (type))
4642 @0))
4643
4534c203
RB
4644/* If the real part is inf and the imag part is known to be
4645 nonnegative, return (inf + 0i). */
4646(simplify
4647 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4648 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
4649 { build_complex_inf (type, false); }))
4650
4534c203
RB
4651/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4652(simplify
4653 (CPROJ (complex @0 REAL_CST@1))
4654 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 4655 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 4656
b4838d77
RS
4657(for pows (POW)
4658 sqrts (SQRT)
4659 cbrts (CBRT)
4660 (simplify
4661 (pows @0 REAL_CST@1)
4662 (with {
4663 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4664 REAL_VALUE_TYPE tmp;
4665 }
4666 (switch
4667 /* pow(x,0) -> 1. */
4668 (if (real_equal (value, &dconst0))
4669 { build_real (type, dconst1); })
4670 /* pow(x,1) -> x. */
4671 (if (real_equal (value, &dconst1))
4672 @0)
4673 /* pow(x,-1) -> 1/x. */
4674 (if (real_equal (value, &dconstm1))
4675 (rdiv { build_real (type, dconst1); } @0))
4676 /* pow(x,0.5) -> sqrt(x). */
4677 (if (flag_unsafe_math_optimizations
4678 && canonicalize_math_p ()
4679 && real_equal (value, &dconsthalf))
4680 (sqrts @0))
4681 /* pow(x,1/3) -> cbrt(x). */
4682 (if (flag_unsafe_math_optimizations
4683 && canonicalize_math_p ()
4684 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4685 real_equal (value, &tmp)))
4686 (cbrts @0))))))
4534c203 4687
5ddc84ca
RS
4688/* powi(1,x) -> 1. */
4689(simplify
4690 (POWI real_onep@0 @1)
4691 @0)
4692
4693(simplify
4694 (POWI @0 INTEGER_CST@1)
4695 (switch
4696 /* powi(x,0) -> 1. */
8e6cdc90 4697 (if (wi::to_wide (@1) == 0)
5ddc84ca
RS
4698 { build_real (type, dconst1); })
4699 /* powi(x,1) -> x. */
8e6cdc90 4700 (if (wi::to_wide (@1) == 1)
5ddc84ca
RS
4701 @0)
4702 /* powi(x,-1) -> 1/x. */
8e6cdc90 4703 (if (wi::to_wide (@1) == -1)
5ddc84ca
RS
4704 (rdiv { build_real (type, dconst1); } @0))))
4705
03cc70b5 4706/* Narrowing of arithmetic and logical operations.
be144838
JL
4707
4708 These are conceptually similar to the transformations performed for
4709 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4710 term we want to move all that code out of the front-ends into here. */
4711
4712/* If we have a narrowing conversion of an arithmetic operation where
4713 both operands are widening conversions from the same type as the outer
4714 narrowing conversion. Then convert the innermost operands to a suitable
9c582551 4715 unsigned type (to avoid introducing undefined behavior), perform the
be144838
JL
4716 operation and convert the result to the desired type. */
4717(for op (plus minus)
4718 (simplify
93f90bec 4719 (convert (op:s (convert@2 @0) (convert?@3 @1)))
be144838
JL
4720 (if (INTEGRAL_TYPE_P (type)
4721 /* We check for type compatibility between @0 and @1 below,
4722 so there's no need to check that @1/@3 are integral types. */
4723 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4724 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4725 /* The precision of the type of each operand must match the
4726 precision of the mode of each operand, similarly for the
4727 result. */
2be65d9e
RS
4728 && type_has_mode_precision_p (TREE_TYPE (@0))
4729 && type_has_mode_precision_p (TREE_TYPE (@1))
4730 && type_has_mode_precision_p (type)
be144838
JL
4731 /* The inner conversion must be a widening conversion. */
4732 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
93f90bec
BC
4733 && types_match (@0, type)
4734 && (types_match (@0, @1)
4735 /* Or the second operand is const integer or converted const
4736 integer from valueize. */
4737 || TREE_CODE (@1) == INTEGER_CST))
be144838 4738 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
93f90bec 4739 (op @0 (convert @1))
8fdc6c67 4740 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
93f90bec
BC
4741 (convert (op (convert:utype @0)
4742 (convert:utype @1))))))))
48451e8f
JL
4743
4744/* This is another case of narrowing, specifically when there's an outer
4745 BIT_AND_EXPR which masks off bits outside the type of the innermost
4746 operands. Like the previous case we have to convert the operands
9c582551 4747 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
4748 arithmetic operation. */
4749(for op (minus plus)
8fdc6c67
RB
4750 (simplify
4751 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4752 (if (INTEGRAL_TYPE_P (type)
4753 /* We check for type compatibility between @0 and @1 below,
4754 so there's no need to check that @1/@3 are integral types. */
4755 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4756 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4757 /* The precision of the type of each operand must match the
4758 precision of the mode of each operand, similarly for the
4759 result. */
2be65d9e
RS
4760 && type_has_mode_precision_p (TREE_TYPE (@0))
4761 && type_has_mode_precision_p (TREE_TYPE (@1))
4762 && type_has_mode_precision_p (type)
8fdc6c67
RB
4763 /* The inner conversion must be a widening conversion. */
4764 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4765 && types_match (@0, @1)
4766 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4767 <= TYPE_PRECISION (TREE_TYPE (@0)))
8e6cdc90
RS
4768 && (wi::to_wide (@4)
4769 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4770 true, TYPE_PRECISION (type))) == 0)
8fdc6c67
RB
4771 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4772 (with { tree ntype = TREE_TYPE (@0); }
4773 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4774 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4775 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4776 (convert:utype @4))))))))
4f7a5692 4777
03cc70b5 4778/* Transform (@0 < @1 and @0 < @2) to use min,
4f7a5692 4779 (@0 > @1 and @0 > @2) to use max */
dac920e8
MG
4780(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
4781 op (lt le gt ge lt le gt ge )
4782 ext (min min max max max max min min )
4f7a5692 4783 (simplify
dac920e8 4784 (logic (op:cs @0 @1) (op:cs @0 @2))
4618c453
RB
4785 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4786 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
4787 (op @0 (ext @1 @2)))))
4788
7317ef4a
RS
4789(simplify
4790 /* signbit(x) -> 0 if x is nonnegative. */
4791 (SIGNBIT tree_expr_nonnegative_p@0)
4792 { integer_zero_node; })
4793
4794(simplify
4795 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4796 (SIGNBIT @0)
4797 (if (!HONOR_SIGNED_ZEROS (@0))
4798 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
4799
4800/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4801(for cmp (eq ne)
4802 (for op (plus minus)
4803 rop (minus plus)
4804 (simplify
4805 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4806 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4807 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4808 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4809 && !TYPE_SATURATING (TREE_TYPE (@0)))
4810 (with { tree res = int_const_binop (rop, @2, @1); }
75473a91
RB
4811 (if (TREE_OVERFLOW (res)
4812 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
a8b85ce9
MG
4813 { constant_boolean_node (cmp == NE_EXPR, type); }
4814 (if (single_use (@3))
11c1e63c
JJ
4815 (cmp @0 { TREE_OVERFLOW (res)
4816 ? drop_tree_overflow (res) : res; }))))))))
a8b85ce9
MG
4817(for cmp (lt le gt ge)
4818 (for op (plus minus)
4819 rop (minus plus)
4820 (simplify
4821 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4822 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4823 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4824 (with { tree res = int_const_binop (rop, @2, @1); }
4825 (if (TREE_OVERFLOW (res))
4826 {
4827 fold_overflow_warning (("assuming signed overflow does not occur "
4828 "when simplifying conditional to constant"),
4829 WARN_STRICT_OVERFLOW_CONDITIONAL);
4830 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4831 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
8e6cdc90
RS
4832 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4833 TYPE_SIGN (TREE_TYPE (@1)))
a8b85ce9
MG
4834 != (op == MINUS_EXPR);
4835 constant_boolean_node (less == ovf_high, type);
4836 }
4837 (if (single_use (@3))
4838 (with
4839 {
4840 fold_overflow_warning (("assuming signed overflow does not occur "
4841 "when changing X +- C1 cmp C2 to "
4842 "X cmp C2 -+ C1"),
4843 WARN_STRICT_OVERFLOW_COMPARISON);
4844 }
4845 (cmp @0 { res; })))))))))
d3e40b76
RB
4846
4847/* Canonicalizations of BIT_FIELD_REFs. */
4848
6ec96dcb
RB
4849(simplify
4850 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
4851 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
4852
4853(simplify
4854 (BIT_FIELD_REF (view_convert @0) @1 @2)
4855 (BIT_FIELD_REF @0 @1 @2))
4856
4857(simplify
4858 (BIT_FIELD_REF @0 @1 integer_zerop)
4859 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
4860 (view_convert @0)))
4861
d3e40b76
RB
4862(simplify
4863 (BIT_FIELD_REF @0 @1 @2)
4864 (switch
4865 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4866 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4867 (switch
4868 (if (integer_zerop (@2))
4869 (view_convert (realpart @0)))
4870 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4871 (view_convert (imagpart @0)))))
4872 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4873 && INTEGRAL_TYPE_P (type)
171f6f05
RB
4874 /* On GIMPLE this should only apply to register arguments. */
4875 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
4876 /* A bit-field-ref that referenced the full argument can be stripped. */
4877 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4878 && integer_zerop (@2))
4879 /* Low-parts can be reduced to integral conversions.
4880 ??? The following doesn't work for PDP endian. */
4881 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4882 /* Don't even think about BITS_BIG_ENDIAN. */
4883 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4884 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4885 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4886 ? (TYPE_PRECISION (TREE_TYPE (@0))
4887 - TYPE_PRECISION (type))
4888 : 0)) == 0)))
4889 (convert @0))))
4890
4891/* Simplify vector extracts. */
4892
4893(simplify
4894 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4895 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4896 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4897 || (VECTOR_TYPE_P (type)
4898 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4899 (with
4900 {
4901 tree ctor = (TREE_CODE (@0) == SSA_NAME
4902 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4903 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4904 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4905 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4906 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4907 }
4908 (if (n != 0
4909 && (idx % width) == 0
4910 && (n % width) == 0
928686b1
RS
4911 && known_le ((idx + n) / width,
4912 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
d3e40b76
RB
4913 (with
4914 {
4915 idx = idx / width;
4916 n = n / width;
4917 /* Constructor elements can be subvectors. */
d34457c1 4918 poly_uint64 k = 1;
d3e40b76
RB
4919 if (CONSTRUCTOR_NELTS (ctor) != 0)
4920 {
4921 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4922 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4923 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4924 }
d34457c1 4925 unsigned HOST_WIDE_INT elt, count, const_k;
d3e40b76
RB
4926 }
4927 (switch
4928 /* We keep an exact subset of the constructor elements. */
d34457c1 4929 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
d3e40b76
RB
4930 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4931 { build_constructor (type, NULL); }
d34457c1
RS
4932 (if (count == 1)
4933 (if (elt < CONSTRUCTOR_NELTS (ctor))
4c1da8ea 4934 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
d34457c1 4935 { build_zero_cst (type); })
d3e40b76 4936 {
d34457c1
RS
4937 vec<constructor_elt, va_gc> *vals;
4938 vec_alloc (vals, count);
4939 for (unsigned i = 0;
4940 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
4941 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4942 CONSTRUCTOR_ELT (ctor, elt + i)->value);
4943 build_constructor (type, vals);
4944 })))
d3e40b76 4945 /* The bitfield references a single constructor element. */
d34457c1
RS
4946 (if (k.is_constant (&const_k)
4947 && idx + n <= (idx / const_k + 1) * const_k)
d3e40b76 4948 (switch
d34457c1 4949 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
d3e40b76 4950 { build_zero_cst (type); })
d34457c1 4951 (if (n == const_k)
4c1da8ea 4952 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
d34457c1
RS
4953 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
4954 @1 { bitsize_int ((idx % const_k) * width); })))))))))
92e29a5e
RB
4955
4956/* Simplify a bit extraction from a bit insertion for the cases with
4957 the inserted element fully covering the extraction or the insertion
4958 not touching the extraction. */
4959(simplify
4960 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4961 (with
4962 {
4963 unsigned HOST_WIDE_INT isize;
4964 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4965 isize = TYPE_PRECISION (TREE_TYPE (@1));
4966 else
4967 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4968 }
4969 (switch
8e6cdc90
RS
4970 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4971 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
4972 wi::to_wide (@ipos) + isize))
92e29a5e 4973 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
8e6cdc90
RS
4974 wi::to_wide (@rpos)
4975 - wi::to_wide (@ipos)); }))
4976 (if (wi::geu_p (wi::to_wide (@ipos),
4977 wi::to_wide (@rpos) + wi::to_wide (@rsize))
4978 || wi::geu_p (wi::to_wide (@rpos),
4979 wi::to_wide (@ipos) + isize))
92e29a5e 4980 (BIT_FIELD_REF @0 @rsize @rpos)))))
c566cc9f 4981
c453ccc2
RS
4982(if (canonicalize_math_after_vectorization_p ())
4983 (for fmas (FMA)
4984 (simplify
4985 (fmas:c (negate @0) @1 @2)
4986 (IFN_FNMA @0 @1 @2))
4987 (simplify
4988 (fmas @0 @1 (negate @2))
4989 (IFN_FMS @0 @1 @2))
4990 (simplify
4991 (fmas:c (negate @0) @1 (negate @2))
4992 (IFN_FNMS @0 @1 @2))
4993 (simplify
4994 (negate (fmas@3 @0 @1 @2))
4995 (if (single_use (@3))
4996 (IFN_FNMS @0 @1 @2))))
4997
c566cc9f 4998 (simplify
c453ccc2
RS
4999 (IFN_FMS:c (negate @0) @1 @2)
5000 (IFN_FNMS @0 @1 @2))
5001 (simplify
5002 (IFN_FMS @0 @1 (negate @2))
5003 (IFN_FMA @0 @1 @2))
5004 (simplify
5005 (IFN_FMS:c (negate @0) @1 (negate @2))
c566cc9f
RS
5006 (IFN_FNMA @0 @1 @2))
5007 (simplify
c453ccc2
RS
5008 (negate (IFN_FMS@3 @0 @1 @2))
5009 (if (single_use (@3))
5010 (IFN_FNMA @0 @1 @2)))
5011
5012 (simplify
5013 (IFN_FNMA:c (negate @0) @1 @2)
5014 (IFN_FMA @0 @1 @2))
c566cc9f 5015 (simplify
c453ccc2 5016 (IFN_FNMA @0 @1 (negate @2))
c566cc9f
RS
5017 (IFN_FNMS @0 @1 @2))
5018 (simplify
c453ccc2
RS
5019 (IFN_FNMA:c (negate @0) @1 (negate @2))
5020 (IFN_FMS @0 @1 @2))
5021 (simplify
5022 (negate (IFN_FNMA@3 @0 @1 @2))
c566cc9f 5023 (if (single_use (@3))
c453ccc2 5024 (IFN_FMS @0 @1 @2)))
c566cc9f 5025
c453ccc2
RS
5026 (simplify
5027 (IFN_FNMS:c (negate @0) @1 @2)
5028 (IFN_FMS @0 @1 @2))
5029 (simplify
5030 (IFN_FNMS @0 @1 (negate @2))
5031 (IFN_FNMA @0 @1 @2))
5032 (simplify
5033 (IFN_FNMS:c (negate @0) @1 (negate @2))
5034 (IFN_FMA @0 @1 @2))
5035 (simplify
5036 (negate (IFN_FNMS@3 @0 @1 @2))
c566cc9f 5037 (if (single_use (@3))
c453ccc2 5038 (IFN_FMA @0 @1 @2))))
ba6557e2
RS
5039
5040/* POPCOUNT simplifications. */
5041(for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5042 BUILT_IN_POPCOUNTIMAX)
5043 /* popcount(X&1) is nop_expr(X&1). */
5044 (simplify
5045 (popcount @0)
5046 (if (tree_nonzero_bits (@0) == 1)
5047 (convert @0)))
5048 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5049 (simplify
5050 (plus (popcount:s @0) (popcount:s @1))
5051 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5052 (popcount (bit_ior @0 @1))))
5053 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5054 (for cmp (le eq ne gt)
5055 rep (eq eq ne ne)
5056 (simplify
5057 (cmp (popcount @0) integer_zerop)
5058 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
0d2b3bca
RS
5059
5060/* Simplify:
5061
5062 a = a1 op a2
5063 r = c ? a : b;
5064
5065 to:
5066
5067 r = c ? a1 op a2 : b;
5068
5069 if the target can do it in one go. This makes the operation conditional
5070 on c, so could drop potentially-trapping arithmetic, but that's a valid
5071 simplification if the result of the operation isn't needed. */
5072(for uncond_op (UNCOND_BINARY)
5073 cond_op (COND_BINARY)
5074 (simplify
5075 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5076 (with { tree op_type = TREE_TYPE (@4); }
5077 (if (element_precision (type) == element_precision (op_type))
5078 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5079 (simplify
5080 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5081 (with { tree op_type = TREE_TYPE (@4); }
5082 (if (element_precision (type) == element_precision (op_type))
5083 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
6a86928d 5084
b41d1f6e
RS
5085/* Same for ternary operations. */
5086(for uncond_op (UNCOND_TERNARY)
5087 cond_op (COND_TERNARY)
5088 (simplify
5089 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5090 (with { tree op_type = TREE_TYPE (@5); }
5091 (if (element_precision (type) == element_precision (op_type))
5092 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5093 (simplify
5094 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5095 (with { tree op_type = TREE_TYPE (@5); }
5096 (if (element_precision (type) == element_precision (op_type))
5097 (view_convert (cond_op (bit_not @0) @2 @3 @4
5098 (view_convert:op_type @1)))))))
5099
6a86928d
RS
5100/* Detect cases in which a VEC_COND_EXPR effectively replaces the
5101 "else" value of an IFN_COND_*. */
5102(for cond_op (COND_BINARY)
5103 (simplify
5104 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5105 (with { tree op_type = TREE_TYPE (@3); }
5106 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
5107 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5108 (simplify
5109 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5110 (with { tree op_type = TREE_TYPE (@5); }
5111 (if (inverse_conditions_p (@0, @2)
5112 && element_precision (type) == element_precision (op_type))
5113 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
b41d1f6e
RS
5114
5115/* Same for ternary operations. */
5116(for cond_op (COND_TERNARY)
5117 (simplify
5118 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5119 (with { tree op_type = TREE_TYPE (@4); }
5120 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
5121 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5122 (simplify
5123 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5124 (with { tree op_type = TREE_TYPE (@6); }
5125 (if (inverse_conditions_p (@0, @2)
5126 && element_precision (type) == element_precision (op_type))
5127 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
a19f98d5
RS
5128
5129/* For pointers @0 and @2 and nonnegative constant offset @1, look for
5130 expressions like:
5131
5132 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5133 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5134
5135 If pointers are known not to wrap, B checks whether @1 bytes starting
5136 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5137 bytes. A is more efficiently tested as:
5138
5139 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5140
5141 The equivalent expression for B is given by replacing @1 with @1 - 1:
5142
5143 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5144
5145 @0 and @2 can be swapped in both expressions without changing the result.
5146
5147 The folds rely on sizetype's being unsigned (which is always true)
5148 and on its being the same width as the pointer (which we have to check).
5149
5150 The fold replaces two pointer_plus expressions, two comparisons and
5151 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5152 the best case it's a saving of two operations. The A fold retains one
5153 of the original pointer_pluses, so is a win even if both pointer_pluses
5154 are used elsewhere. The B fold is a wash if both pointer_pluses are
5155 used elsewhere, since all we end up doing is replacing a comparison with
5156 a pointer_plus. We do still apply the fold under those circumstances
5157 though, in case applying it to other conditions eventually makes one of the
5158 pointer_pluses dead. */
5159(for ior (truth_orif truth_or bit_ior)
5160 (for cmp (le lt)
5161 (simplify
5162 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5163 (cmp:cs (pointer_plus@4 @2 @1) @0))
5164 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5165 && TYPE_OVERFLOW_WRAPS (sizetype)
5166 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5167 /* Calculate the rhs constant. */
5168 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5169 offset_int rhs = off * 2; }
5170 /* Always fails for negative values. */
5171 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5172 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5173 pick a canonical order. This increases the chances of using the
5174 same pointer_plus in multiple checks. */
5175 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5176 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5177 (if (cmp == LT_EXPR)
5178 (gt (convert:sizetype
5179 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5180 { swap_p ? @0 : @2; }))
5181 { rhs_tree; })
5182 (gt (convert:sizetype
5183 (pointer_diff:ssizetype
5184 (pointer_plus { swap_p ? @2 : @0; }
5185 { wide_int_to_tree (sizetype, off); })
5186 { swap_p ? @0 : @2; }))
5187 { rhs_tree; })))))))))