]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
Only include bits/stl_algo.h for C++20.
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
85ec4feb 5 Copyright (C) 2014-2018 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
f3582e54 32 CONSTANT_CLASS_P
887ab609 33 tree_expr_nonnegative_p
e36c1cfe 34 tree_expr_nonzero_p
67dbe582 35 integer_valued_real_p
53a19317 36 integer_pow2p
f06e47d7 37 uniform_integer_cst_p
53a19317 38 HONOR_NANS)
e0ee10ed 39
f84e7fd6
RB
40/* Operator lists. */
41(define_operator_list tcc_comparison
42 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
43(define_operator_list inverted_tcc_comparison
44 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
45(define_operator_list inverted_tcc_comparison_with_nans
46 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
47(define_operator_list swapped_tcc_comparison
48 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
49(define_operator_list simple_comparison lt le eq ne ge gt)
50(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
51
b1dc4a20 52#include "cfn-operators.pd"
257aecb4 53
543a9bcd
RS
54/* Define operand lists for math rounding functions {,i,l,ll}FN,
55 where the versions prefixed with "i" return an int, those prefixed with
56 "l" return a long and those prefixed with "ll" return a long long.
57
58 Also define operand lists:
59
60 X<FN>F for all float functions, in the order i, l, ll
61 X<FN> for all double functions, in the same order
62 X<FN>L for all long double functions, in the same order. */
63#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
64 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
65 BUILT_IN_L##FN##F \
66 BUILT_IN_LL##FN##F) \
67 (define_operator_list X##FN BUILT_IN_I##FN \
68 BUILT_IN_L##FN \
69 BUILT_IN_LL##FN) \
70 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
71 BUILT_IN_L##FN##L \
72 BUILT_IN_LL##FN##L)
73
543a9bcd
RS
74DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
75DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
76DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
77DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
0d2b3bca
RS
78
79/* Binary operations and their associated IFN_COND_* function. */
80(define_operator_list UNCOND_BINARY
81 plus minus
6c4fd4a9 82 mult trunc_div trunc_mod rdiv
0d2b3bca
RS
83 min max
84 bit_and bit_ior bit_xor)
85(define_operator_list COND_BINARY
86 IFN_COND_ADD IFN_COND_SUB
6c4fd4a9 87 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
0d2b3bca
RS
88 IFN_COND_MIN IFN_COND_MAX
89 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR)
b41d1f6e
RS
90
91/* Same for ternary operations. */
92(define_operator_list UNCOND_TERNARY
93 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
94(define_operator_list COND_TERNARY
95 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
03cc70b5 96
ed73f46f
MG
97/* As opposed to convert?, this still creates a single pattern, so
98 it is not a suitable replacement for convert? in all cases. */
99(match (nop_convert @0)
100 (convert @0)
101 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
102(match (nop_convert @0)
103 (view_convert @0)
104 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
928686b1
RS
105 && known_eq (TYPE_VECTOR_SUBPARTS (type),
106 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
ed73f46f
MG
107 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
108/* This one has to be last, or it shadows the others. */
109(match (nop_convert @0)
03cc70b5 110 @0)
f84e7fd6 111
e197e64e
KV
112/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
113 ABSU_EXPR returns unsigned absolute value of the operand and the operand
114 of the ABSU_EXPR will have the corresponding signed type. */
115(simplify (abs (convert @0))
116 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
117 && !TYPE_UNSIGNED (TREE_TYPE (@0))
118 && element_precision (type) > element_precision (TREE_TYPE (@0)))
119 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
120 (convert (absu:utype @0)))))
121
122
e0ee10ed 123/* Simplifications of operations with one constant operand and
36a60e48 124 simplifications to constants or single values. */
e0ee10ed
RB
125
126(for op (plus pointer_plus minus bit_ior bit_xor)
127 (simplify
128 (op @0 integer_zerop)
129 (non_lvalue @0)))
130
a499aac5
RB
131/* 0 +p index -> (type)index */
132(simplify
133 (pointer_plus integer_zerop @1)
134 (non_lvalue (convert @1)))
135
d43177ad
MG
136/* ptr - 0 -> (type)ptr */
137(simplify
138 (pointer_diff @0 integer_zerop)
139 (convert @0))
140
a7f24614
RB
141/* See if ARG1 is zero and X + ARG1 reduces to X.
142 Likewise if the operands are reversed. */
143(simplify
144 (plus:c @0 real_zerop@1)
145 (if (fold_real_zero_addition_p (type, @1, 0))
146 (non_lvalue @0)))
147
148/* See if ARG1 is zero and X - ARG1 reduces to X. */
149(simplify
150 (minus @0 real_zerop@1)
151 (if (fold_real_zero_addition_p (type, @1, 1))
152 (non_lvalue @0)))
153
e0ee10ed
RB
154/* Simplify x - x.
155 This is unsafe for certain floats even in non-IEEE formats.
156 In IEEE, it is unsafe because it does wrong for NaNs.
157 Also note that operand_equal_p is always false if an operand
158 is volatile. */
159(simplify
a7f24614 160 (minus @0 @0)
1b457aa4 161 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 162 { build_zero_cst (type); }))
1af4ebf5
MG
163(simplify
164 (pointer_diff @@0 @0)
165 { build_zero_cst (type); })
e0ee10ed
RB
166
167(simplify
a7f24614
RB
168 (mult @0 integer_zerop@1)
169 @1)
170
171/* Maybe fold x * 0 to 0. The expressions aren't the same
172 when x is NaN, since x * 0 is also NaN. Nor are they the
173 same in modes with signed zeros, since multiplying a
174 negative value by 0 gives -0, not +0. */
175(simplify
176 (mult @0 real_zerop@1)
8b5ee871 177 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
178 @1))
179
180/* In IEEE floating point, x*1 is not equivalent to x for snans.
181 Likewise for complex arithmetic with signed zeros. */
182(simplify
183 (mult @0 real_onep)
8b5ee871
MG
184 (if (!HONOR_SNANS (type)
185 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
186 || !COMPLEX_FLOAT_TYPE_P (type)))
187 (non_lvalue @0)))
188
189/* Transform x * -1.0 into -x. */
190(simplify
191 (mult @0 real_minus_onep)
8b5ee871
MG
192 (if (!HONOR_SNANS (type)
193 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
194 || !COMPLEX_FLOAT_TYPE_P (type)))
195 (negate @0)))
e0ee10ed 196
8c2805bb
AP
197(for cmp (gt ge lt le)
198 outp (convert convert negate negate)
199 outn (negate negate convert convert)
200 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
201 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
202 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
203 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
204 (simplify
205 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
206 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
207 && types_match (type, TREE_TYPE (@0)))
208 (switch
209 (if (types_match (type, float_type_node))
210 (BUILT_IN_COPYSIGNF @1 (outp @0)))
211 (if (types_match (type, double_type_node))
212 (BUILT_IN_COPYSIGN @1 (outp @0)))
213 (if (types_match (type, long_double_type_node))
214 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
215 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
216 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
217 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
218 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
219 (simplify
220 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
221 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
222 && types_match (type, TREE_TYPE (@0)))
223 (switch
224 (if (types_match (type, float_type_node))
225 (BUILT_IN_COPYSIGNF @1 (outn @0)))
226 (if (types_match (type, double_type_node))
227 (BUILT_IN_COPYSIGN @1 (outn @0)))
228 (if (types_match (type, long_double_type_node))
229 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
230
231/* Transform X * copysign (1.0, X) into abs(X). */
232(simplify
c6cfa2bf 233 (mult:c @0 (COPYSIGN_ALL real_onep @0))
8c2805bb
AP
234 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
235 (abs @0)))
236
237/* Transform X * copysign (1.0, -X) into -abs(X). */
238(simplify
c6cfa2bf 239 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
8c2805bb
AP
240 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
241 (negate (abs @0))))
242
243/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
244(simplify
c6cfa2bf 245 (COPYSIGN_ALL REAL_CST@0 @1)
8c2805bb 246 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
c6cfa2bf 247 (COPYSIGN_ALL (negate @0) @1)))
8c2805bb 248
5b7f6ed0 249/* X * 1, X / 1 -> X. */
e0ee10ed
RB
250(for op (mult trunc_div ceil_div floor_div round_div exact_div)
251 (simplify
252 (op @0 integer_onep)
253 (non_lvalue @0)))
254
71f82be9
JG
255/* (A / (1 << B)) -> (A >> B).
256 Only for unsigned A. For signed A, this would not preserve rounding
257 toward zero.
258 For example: (-1 / ( 1 << B)) != -1 >> B. */
259(simplify
260 (trunc_div @0 (lshift integer_onep@1 @2))
261 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
262 && (!VECTOR_TYPE_P (type)
263 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
264 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
265 (rshift @0 @2)))
266
5b7f6ed0
MG
267/* Preserve explicit divisions by 0: the C++ front-end wants to detect
268 undefined behavior in constexpr evaluation, and assuming that the division
269 traps enables better optimizations than these anyway. */
a7f24614 270(for div (trunc_div ceil_div floor_div round_div exact_div)
5b7f6ed0
MG
271 /* 0 / X is always zero. */
272 (simplify
273 (div integer_zerop@0 @1)
274 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
275 (if (!integer_zerop (@1))
276 @0))
da186c1f 277 /* X / -1 is -X. */
a7f24614 278 (simplify
09240451
MG
279 (div @0 integer_minus_onep@1)
280 (if (!TYPE_UNSIGNED (type))
da186c1f 281 (negate @0)))
5b7f6ed0
MG
282 /* X / X is one. */
283 (simplify
284 (div @0 @0)
9ebce098
JJ
285 /* But not for 0 / 0 so that we can get the proper warnings and errors.
286 And not for _Fract types where we can't build 1. */
287 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
5b7f6ed0 288 { build_one_cst (type); }))
03cc70b5 289 /* X / abs (X) is X < 0 ? -1 : 1. */
da186c1f 290 (simplify
d96a5585
RB
291 (div:C @0 (abs @0))
292 (if (INTEGRAL_TYPE_P (type)
da186c1f
RB
293 && TYPE_OVERFLOW_UNDEFINED (type))
294 (cond (lt @0 { build_zero_cst (type); })
295 { build_minus_one_cst (type); } { build_one_cst (type); })))
296 /* X / -X is -1. */
297 (simplify
d96a5585 298 (div:C @0 (negate @0))
da186c1f
RB
299 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
300 && TYPE_OVERFLOW_UNDEFINED (type))
301 { build_minus_one_cst (type); })))
a7f24614
RB
302
303/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
304 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
305(simplify
306 (floor_div @0 @1)
09240451
MG
307 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
308 && TYPE_UNSIGNED (type))
a7f24614
RB
309 (trunc_div @0 @1)))
310
28093105
RB
311/* Combine two successive divisions. Note that combining ceil_div
312 and floor_div is trickier and combining round_div even more so. */
313(for div (trunc_div exact_div)
c306cfaf
RB
314 (simplify
315 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
316 (with {
4a669ac3 317 wi::overflow_type overflow;
8e6cdc90 318 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 319 TYPE_SIGN (type), &overflow);
c306cfaf 320 }
4a669ac3 321 (if (!overflow)
8fdc6c67
RB
322 (div @0 { wide_int_to_tree (type, mul); })
323 (if (TYPE_UNSIGNED (type)
324 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
325 { build_zero_cst (type); })))))
c306cfaf 326
288fe52e
AM
327/* Combine successive multiplications. Similar to above, but handling
328 overflow is different. */
329(simplify
330 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
331 (with {
4a669ac3 332 wi::overflow_type overflow;
8e6cdc90 333 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 334 TYPE_SIGN (type), &overflow);
288fe52e
AM
335 }
336 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
337 otherwise undefined overflow implies that @0 must be zero. */
4a669ac3 338 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
288fe52e
AM
339 (mult @0 { wide_int_to_tree (type, mul); }))))
340
a7f24614 341/* Optimize A / A to 1.0 if we don't care about
09240451 342 NaNs or Infinities. */
a7f24614
RB
343(simplify
344 (rdiv @0 @0)
09240451 345 (if (FLOAT_TYPE_P (type)
1b457aa4 346 && ! HONOR_NANS (type)
8b5ee871 347 && ! HONOR_INFINITIES (type))
09240451
MG
348 { build_one_cst (type); }))
349
350/* Optimize -A / A to -1.0 if we don't care about
351 NaNs or Infinities. */
352(simplify
e04d2a35 353 (rdiv:C @0 (negate @0))
09240451 354 (if (FLOAT_TYPE_P (type)
1b457aa4 355 && ! HONOR_NANS (type)
8b5ee871 356 && ! HONOR_INFINITIES (type))
09240451 357 { build_minus_one_cst (type); }))
a7f24614 358
8c6961ca
PK
359/* PR71078: x / abs(x) -> copysign (1.0, x) */
360(simplify
361 (rdiv:C (convert? @0) (convert? (abs @0)))
362 (if (SCALAR_FLOAT_TYPE_P (type)
363 && ! HONOR_NANS (type)
364 && ! HONOR_INFINITIES (type))
365 (switch
366 (if (types_match (type, float_type_node))
367 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
368 (if (types_match (type, double_type_node))
369 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
370 (if (types_match (type, long_double_type_node))
371 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
372
a7f24614
RB
373/* In IEEE floating point, x/1 is not equivalent to x for snans. */
374(simplify
375 (rdiv @0 real_onep)
8b5ee871 376 (if (!HONOR_SNANS (type))
a7f24614
RB
377 (non_lvalue @0)))
378
379/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
380(simplify
381 (rdiv @0 real_minus_onep)
8b5ee871 382 (if (!HONOR_SNANS (type))
a7f24614
RB
383 (negate @0)))
384
5711ac88 385(if (flag_reciprocal_math)
81825e28 386 /* Convert (A/B)/C to A/(B*C). */
5711ac88
N
387 (simplify
388 (rdiv (rdiv:s @0 @1) @2)
81825e28
WD
389 (rdiv @0 (mult @1 @2)))
390
391 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
392 (simplify
393 (rdiv @0 (mult:s @1 REAL_CST@2))
394 (with
395 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
396 (if (tem)
397 (rdiv (mult @0 { tem; } ) @1))))
5711ac88
N
398
399 /* Convert A/(B/C) to (A/B)*C */
400 (simplify
401 (rdiv @0 (rdiv:s @1 @2))
402 (mult (rdiv @0 @1) @2)))
403
6a435314
WD
404/* Simplify x / (- y) to -x / y. */
405(simplify
406 (rdiv @0 (negate @1))
407 (rdiv (negate @0) @1))
408
5e21d765
WD
409(if (flag_unsafe_math_optimizations)
410 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
411 Since C / x may underflow to zero, do this only for unsafe math. */
412 (for op (lt le gt ge)
413 neg_op (gt ge lt le)
414 (simplify
415 (op (rdiv REAL_CST@0 @1) real_zerop@2)
416 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
417 (switch
418 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
419 (op @1 @2))
420 /* For C < 0, use the inverted operator. */
421 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
422 (neg_op @1 @2)))))))
423
5711ac88
N
424/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
425(for div (trunc_div ceil_div floor_div round_div exact_div)
426 (simplify
427 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
428 (if (integer_pow2p (@2)
429 && tree_int_cst_sgn (@2) > 0
a1488398 430 && tree_nop_conversion_p (type, TREE_TYPE (@0))
8e6cdc90
RS
431 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
432 (rshift (convert @0)
433 { build_int_cst (integer_type_node,
434 wi::exact_log2 (wi::to_wide (@2))); }))))
5711ac88 435
a7f24614
RB
436/* If ARG1 is a constant, we can convert this to a multiply by the
437 reciprocal. This does not have the same rounding properties,
438 so only do this if -freciprocal-math. We can actually
439 always safely do it if ARG1 is a power of two, but it's hard to
440 tell if it is or not in a portable manner. */
441(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
442 (simplify
443 (rdiv @0 cst@1)
444 (if (optimize)
53bc4b3a
RB
445 (if (flag_reciprocal_math
446 && !real_zerop (@1))
a7f24614 447 (with
249700b5 448 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 449 (if (tem)
8fdc6c67
RB
450 (mult @0 { tem; } )))
451 (if (cst != COMPLEX_CST)
452 (with { tree inverse = exact_inverse (type, @1); }
453 (if (inverse)
454 (mult @0 { inverse; } ))))))))
a7f24614 455
a7f24614 456(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
457 /* 0 % X is always zero. */
458 (simplify
a7f24614 459 (mod integer_zerop@0 @1)
e0ee10ed
RB
460 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
461 (if (!integer_zerop (@1))
462 @0))
463 /* X % 1 is always zero. */
464 (simplify
a7f24614
RB
465 (mod @0 integer_onep)
466 { build_zero_cst (type); })
467 /* X % -1 is zero. */
468 (simplify
09240451
MG
469 (mod @0 integer_minus_onep@1)
470 (if (!TYPE_UNSIGNED (type))
bc4315fb 471 { build_zero_cst (type); }))
5b7f6ed0
MG
472 /* X % X is zero. */
473 (simplify
474 (mod @0 @0)
475 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
476 (if (!integer_zerop (@0))
477 { build_zero_cst (type); }))
bc4315fb
MG
478 /* (X % Y) % Y is just X % Y. */
479 (simplify
480 (mod (mod@2 @0 @1) @1)
98e30e51
RB
481 @2)
482 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
483 (simplify
484 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
485 (if (ANY_INTEGRAL_TYPE_P (type)
486 && TYPE_OVERFLOW_UNDEFINED (type)
8e6cdc90
RS
487 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
488 TYPE_SIGN (type)))
392750c5
JJ
489 { build_zero_cst (type); }))
490 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
491 modulo and comparison, since it is simpler and equivalent. */
492 (for cmp (eq ne)
493 (simplify
494 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
495 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
496 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
497 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
a7f24614
RB
498
499/* X % -C is the same as X % C. */
500(simplify
501 (trunc_mod @0 INTEGER_CST@1)
502 (if (TYPE_SIGN (type) == SIGNED
503 && !TREE_OVERFLOW (@1)
8e6cdc90 504 && wi::neg_p (wi::to_wide (@1))
a7f24614
RB
505 && !TYPE_OVERFLOW_TRAPS (type)
506 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
507 && !sign_bit_p (@1, @1))
508 (trunc_mod @0 (negate @1))))
e0ee10ed 509
8f0c696a
RB
510/* X % -Y is the same as X % Y. */
511(simplify
512 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
513 (if (INTEGRAL_TYPE_P (type)
514 && !TYPE_UNSIGNED (type)
8f0c696a 515 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
516 && tree_nop_conversion_p (type, TREE_TYPE (@1))
517 /* Avoid this transformation if X might be INT_MIN or
518 Y might be -1, because we would then change valid
519 INT_MIN % -(-1) into invalid INT_MIN % -1. */
8e6cdc90 520 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
20b8d734
JJ
521 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
522 (TREE_TYPE (@1))))))
8f0c696a
RB
523 (trunc_mod @0 (convert @1))))
524
f461569a
MP
525/* X - (X / Y) * Y is the same as X % Y. */
526(simplify
2eef1fc1
RB
527 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
528 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
fba46f03 529 (convert (trunc_mod @0 @1))))
f461569a 530
8f0c696a
RB
531/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
532 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
533 Also optimize A % (C << N) where C is a power of 2,
534 to A & ((C << N) - 1). */
535(match (power_of_two_cand @1)
536 INTEGER_CST@1)
537(match (power_of_two_cand @1)
538 (lshift INTEGER_CST@1 @2))
539(for mod (trunc_mod floor_mod)
540 (simplify
4ab1e111 541 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
542 (if ((TYPE_UNSIGNED (type)
543 || tree_expr_nonnegative_p (@0))
4ab1e111 544 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 545 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 546 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 547
887ab609
N
548/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
549(simplify
550 (trunc_div (mult @0 integer_pow2p@1) @1)
551 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
552 (bit_and @0 { wide_int_to_tree
8e6cdc90
RS
553 (type, wi::mask (TYPE_PRECISION (type)
554 - wi::exact_log2 (wi::to_wide (@1)),
887ab609
N
555 false, TYPE_PRECISION (type))); })))
556
5f8d832e
N
557/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
558(simplify
559 (mult (trunc_div @0 integer_pow2p@1) @1)
560 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
561 (bit_and @0 (negate @1))))
562
95765f36
N
563/* Simplify (t * 2) / 2) -> t. */
564(for div (trunc_div ceil_div floor_div round_div exact_div)
565 (simplify
55d84e61 566 (div (mult:c @0 @1) @1)
95765f36
N
567 (if (ANY_INTEGRAL_TYPE_P (type)
568 && TYPE_OVERFLOW_UNDEFINED (type))
569 @0)))
570
d202f9bd 571(for op (negate abs)
9b054b08
RS
572 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
573 (for coss (COS COSH)
574 (simplify
575 (coss (op @0))
576 (coss @0)))
577 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
578 (for pows (POW)
579 (simplify
580 (pows (op @0) REAL_CST@1)
581 (with { HOST_WIDE_INT n; }
582 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 583 (pows @0 @1)))))
de3fbea3
RB
584 /* Likewise for powi. */
585 (for pows (POWI)
586 (simplify
587 (pows (op @0) INTEGER_CST@1)
8e6cdc90 588 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 589 (pows @0 @1))))
5d3498b4
RS
590 /* Strip negate and abs from both operands of hypot. */
591 (for hypots (HYPOT)
592 (simplify
593 (hypots (op @0) @1)
594 (hypots @0 @1))
595 (simplify
596 (hypots @0 (op @1))
597 (hypots @0 @1)))
598 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
c6cfa2bf 599 (for copysigns (COPYSIGN_ALL)
5d3498b4
RS
600 (simplify
601 (copysigns (op @0) @1)
602 (copysigns @0 @1))))
603
604/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
605(simplify
606 (mult (abs@1 @0) @1)
607 (mult @0 @0))
608
64f7ea7c
KV
609/* Convert absu(x)*absu(x) -> x*x. */
610(simplify
611 (mult (absu@1 @0) @1)
612 (mult (convert@2 @0) @2))
613
5d3498b4
RS
614/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
615(for coss (COS COSH)
616 copysigns (COPYSIGN)
617 (simplify
618 (coss (copysigns @0 @1))
619 (coss @0)))
620
621/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
622(for pows (POW)
623 copysigns (COPYSIGN)
624 (simplify
de3fbea3 625 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
626 (with { HOST_WIDE_INT n; }
627 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
628 (pows @0 @1)))))
de3fbea3
RB
629/* Likewise for powi. */
630(for pows (POWI)
631 copysigns (COPYSIGN)
632 (simplify
633 (pows (copysigns @0 @2) INTEGER_CST@1)
8e6cdc90 634 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 635 (pows @0 @1))))
5d3498b4
RS
636
637(for hypots (HYPOT)
638 copysigns (COPYSIGN)
639 /* hypot(copysign(x, y), z) -> hypot(x, z). */
640 (simplify
641 (hypots (copysigns @0 @1) @2)
642 (hypots @0 @2))
643 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
644 (simplify
645 (hypots @0 (copysigns @1 @2))
646 (hypots @0 @1)))
647
eeb57981 648/* copysign(x, CST) -> [-]abs (x). */
c6cfa2bf 649(for copysigns (COPYSIGN_ALL)
eeb57981
RB
650 (simplify
651 (copysigns @0 REAL_CST@1)
652 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
653 (negate (abs @0))
654 (abs @0))))
655
5d3498b4 656/* copysign(copysign(x, y), z) -> copysign(x, z). */
c6cfa2bf 657(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
658 (simplify
659 (copysigns (copysigns @0 @1) @2)
660 (copysigns @0 @2)))
661
662/* copysign(x,y)*copysign(x,y) -> x*x. */
c6cfa2bf 663(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
664 (simplify
665 (mult (copysigns@2 @0 @1) @2)
666 (mult @0 @0)))
667
668/* ccos(-x) -> ccos(x). Similarly for ccosh. */
669(for ccoss (CCOS CCOSH)
670 (simplify
671 (ccoss (negate @0))
672 (ccoss @0)))
d202f9bd 673
abcc43f5
RS
674/* cabs(-x) and cos(conj(x)) -> cabs(x). */
675(for ops (conj negate)
676 (for cabss (CABS)
677 (simplify
678 (cabss (ops @0))
679 (cabss @0))))
680
0a8f32b8
RB
681/* Fold (a * (1 << b)) into (a << b) */
682(simplify
683 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
684 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 685 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
686 (lshift @0 @2)))
687
4349b15f
SD
688/* Fold (1 << (C - x)) where C = precision(type) - 1
689 into ((1 << C) >> x). */
690(simplify
691 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
692 (if (INTEGRAL_TYPE_P (type)
56ccfbd6 693 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
4349b15f
SD
694 && single_use (@1))
695 (if (TYPE_UNSIGNED (type))
696 (rshift (lshift @0 @2) @3)
697 (with
698 { tree utype = unsigned_type_for (type); }
699 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
700
0a8f32b8
RB
701/* Fold (C1/X)*C2 into (C1*C2)/X. */
702(simplify
ff86345f
RB
703 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
704 (if (flag_associative_math
705 && single_use (@3))
0a8f32b8
RB
706 (with
707 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
708 (if (tem)
709 (rdiv { tem; } @1)))))
710
711/* Simplify ~X & X as zero. */
712(simplify
713 (bit_and:c (convert? @0) (convert? (bit_not @0)))
714 { build_zero_cst (type); })
715
89b80c42
PK
716/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
717(simplify
718 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
719 (if (TYPE_UNSIGNED (type))
720 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
721
7aa13860
PK
722(for bitop (bit_and bit_ior)
723 cmp (eq ne)
a93952d2
JJ
724 /* PR35691: Transform
725 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
726 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
7aa13860
PK
727 (simplify
728 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
729 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
a93952d2
JJ
730 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
731 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
732 (cmp (bit_ior @0 (convert @1)) @2)))
733 /* Transform:
734 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
735 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
736 (simplify
737 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
738 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
739 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
740 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
741 (cmp (bit_and @0 (convert @1)) @2))))
7aa13860 742
10158317
RB
743/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
744(simplify
a9658b11 745 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
746 (minus (bit_xor @0 @1) @1))
747(simplify
748 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
8e6cdc90 749 (if (~wi::to_wide (@2) == wi::to_wide (@1))
10158317
RB
750 (minus (bit_xor @0 @1) @1)))
751
752/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
753(simplify
a8e9f9a3 754 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
755 (minus @1 (bit_xor @0 @1)))
756
42bd89ce
MG
757/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
758(for op (bit_ior bit_xor plus)
759 (simplify
760 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
761 (bit_xor @0 @1))
762 (simplify
763 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
8e6cdc90 764 (if (~wi::to_wide (@2) == wi::to_wide (@1))
42bd89ce 765 (bit_xor @0 @1))))
2066ef6a
PK
766
767/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
768(simplify
769 (bit_ior:c (bit_xor:c @0 @1) @0)
770 (bit_ior @0 @1))
771
e268a77b
MG
772/* (a & ~b) | (a ^ b) --> a ^ b */
773(simplify
774 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
775 @2)
776
777/* (a & ~b) ^ ~a --> ~(a & b) */
778(simplify
779 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
780 (bit_not (bit_and @0 @1)))
781
782/* (a | b) & ~(a ^ b) --> a & b */
783(simplify
784 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
785 (bit_and @0 @1))
786
787/* a | ~(a ^ b) --> a | ~b */
788(simplify
789 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
790 (bit_ior @0 (bit_not @1)))
791
792/* (a | b) | (a &^ b) --> a | b */
793(for op (bit_and bit_xor)
794 (simplify
795 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
796 @2))
797
798/* (a & b) | ~(a ^ b) --> ~(a ^ b) */
799(simplify
800 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
801 @2)
802
803/* ~(~a & b) --> a | ~b */
804(simplify
805 (bit_not (bit_and:cs (bit_not @0) @1))
806 (bit_ior @0 (bit_not @1)))
807
fd8303a5
MC
808/* ~(~a | b) --> a & ~b */
809(simplify
810 (bit_not (bit_ior:cs (bit_not @0) @1))
811 (bit_and @0 (bit_not @1)))
812
d982c5b7
MG
813/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
814#if GIMPLE
815(simplify
816 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
817 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 818 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
d982c5b7
MG
819 (bit_xor @0 @1)))
820#endif
10158317 821
f2901002
JJ
822/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
823 ((A & N) + B) & M -> (A + B) & M
824 Similarly if (N & M) == 0,
825 ((A | N) + B) & M -> (A + B) & M
826 and for - instead of + (or unary - instead of +)
827 and/or ^ instead of |.
828 If B is constant and (B & M) == 0, fold into A & M. */
829(for op (plus minus)
830 (for bitop (bit_and bit_ior bit_xor)
831 (simplify
832 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
833 (with
834 { tree pmop[2];
835 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
836 @3, @4, @1, ERROR_MARK, NULL_TREE,
837 NULL_TREE, pmop); }
838 (if (utype)
839 (convert (bit_and (op (convert:utype { pmop[0]; })
840 (convert:utype { pmop[1]; }))
841 (convert:utype @2))))))
842 (simplify
843 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
844 (with
845 { tree pmop[2];
846 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
847 NULL_TREE, NULL_TREE, @1, bitop, @3,
848 @4, pmop); }
849 (if (utype)
850 (convert (bit_and (op (convert:utype { pmop[0]; })
851 (convert:utype { pmop[1]; }))
852 (convert:utype @2)))))))
853 (simplify
854 (bit_and (op:s @0 @1) INTEGER_CST@2)
855 (with
856 { tree pmop[2];
857 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
858 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
859 NULL_TREE, NULL_TREE, pmop); }
860 (if (utype)
861 (convert (bit_and (op (convert:utype { pmop[0]; })
862 (convert:utype { pmop[1]; }))
863 (convert:utype @2)))))))
864(for bitop (bit_and bit_ior bit_xor)
865 (simplify
866 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
867 (with
868 { tree pmop[2];
869 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
870 bitop, @2, @3, NULL_TREE, ERROR_MARK,
871 NULL_TREE, NULL_TREE, pmop); }
872 (if (utype)
873 (convert (bit_and (negate (convert:utype { pmop[0]; }))
874 (convert:utype @1)))))))
875
bc4315fb
MG
876/* X % Y is smaller than Y. */
877(for cmp (lt ge)
878 (simplify
879 (cmp (trunc_mod @0 @1) @1)
880 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
881 { constant_boolean_node (cmp == LT_EXPR, type); })))
882(for cmp (gt le)
883 (simplify
884 (cmp @1 (trunc_mod @0 @1))
885 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
886 { constant_boolean_node (cmp == GT_EXPR, type); })))
887
e0ee10ed
RB
888/* x | ~0 -> ~0 */
889(simplify
ca0b7ece
RB
890 (bit_ior @0 integer_all_onesp@1)
891 @1)
892
893/* x | 0 -> x */
894(simplify
895 (bit_ior @0 integer_zerop)
896 @0)
e0ee10ed
RB
897
898/* x & 0 -> 0 */
899(simplify
ca0b7ece
RB
900 (bit_and @0 integer_zerop@1)
901 @1)
e0ee10ed 902
a4398a30 903/* ~x | x -> -1 */
8b5ee871
MG
904/* ~x ^ x -> -1 */
905/* ~x + x -> -1 */
906(for op (bit_ior bit_xor plus)
907 (simplify
908 (op:c (convert? @0) (convert? (bit_not @0)))
909 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 910
e0ee10ed
RB
911/* x ^ x -> 0 */
912(simplify
913 (bit_xor @0 @0)
914 { build_zero_cst (type); })
915
36a60e48
RB
916/* Canonicalize X ^ ~0 to ~X. */
917(simplify
918 (bit_xor @0 integer_all_onesp@1)
919 (bit_not @0))
920
921/* x & ~0 -> x */
922(simplify
923 (bit_and @0 integer_all_onesp)
924 (non_lvalue @0))
925
926/* x & x -> x, x | x -> x */
927(for bitop (bit_and bit_ior)
928 (simplify
929 (bitop @0 @0)
930 (non_lvalue @0)))
931
c7986356
MG
932/* x & C -> x if we know that x & ~C == 0. */
933#if GIMPLE
934(simplify
935 (bit_and SSA_NAME@0 INTEGER_CST@1)
936 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 937 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
c7986356
MG
938 @0))
939#endif
940
0f770b01
RV
941/* x + (x & 1) -> (x + 1) & ~1 */
942(simplify
44fc0a51
RB
943 (plus:c @0 (bit_and:s @0 integer_onep@1))
944 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
945
946/* x & ~(x & y) -> x & ~y */
947/* x | ~(x | y) -> x | ~y */
948(for bitop (bit_and bit_ior)
af563d4b 949 (simplify
44fc0a51
RB
950 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
951 (bitop @0 (bit_not @1))))
af563d4b 952
03cc70b5
MC
953/* (~x & y) | ~(x | y) -> ~x */
954(simplify
955 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
956 @2)
957
958/* (x | y) ^ (x | ~y) -> ~x */
959(simplify
960 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
961 (bit_not @0))
962
963/* (x & y) | ~(x | y) -> ~(x ^ y) */
964(simplify
965 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
966 (bit_not (bit_xor @0 @1)))
967
968/* (~x | y) ^ (x ^ y) -> x | ~y */
969(simplify
970 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
971 (bit_ior @0 (bit_not @1)))
972
973/* (x ^ y) | ~(x | y) -> ~(x & y) */
974(simplify
975 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
976 (bit_not (bit_and @0 @1)))
977
af563d4b
MG
978/* (x | y) & ~x -> y & ~x */
979/* (x & y) | ~x -> y | ~x */
980(for bitop (bit_and bit_ior)
981 rbitop (bit_ior bit_and)
982 (simplify
983 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
984 (bitop @1 @2)))
0f770b01 985
f13c4673
MP
986/* (x & y) ^ (x | y) -> x ^ y */
987(simplify
2d6f2dce
MP
988 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
989 (bit_xor @0 @1))
f13c4673 990
9ea65ca6
MP
991/* (x ^ y) ^ (x | y) -> x & y */
992(simplify
993 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
994 (bit_and @0 @1))
995
996/* (x & y) + (x ^ y) -> x | y */
997/* (x & y) | (x ^ y) -> x | y */
998/* (x & y) ^ (x ^ y) -> x | y */
999(for op (plus bit_ior bit_xor)
1000 (simplify
1001 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1002 (bit_ior @0 @1)))
1003
1004/* (x & y) + (x | y) -> x + y */
1005(simplify
1006 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1007 (plus @0 @1))
1008
9737efaf
MP
1009/* (x + y) - (x | y) -> x & y */
1010(simplify
1011 (minus (plus @0 @1) (bit_ior @0 @1))
1012 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1013 && !TYPE_SATURATING (type))
1014 (bit_and @0 @1)))
1015
1016/* (x + y) - (x & y) -> x | y */
1017(simplify
1018 (minus (plus @0 @1) (bit_and @0 @1))
1019 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1020 && !TYPE_SATURATING (type))
1021 (bit_ior @0 @1)))
1022
9ea65ca6
MP
1023/* (x | y) - (x ^ y) -> x & y */
1024(simplify
1025 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1026 (bit_and @0 @1))
1027
1028/* (x | y) - (x & y) -> x ^ y */
1029(simplify
1030 (minus (bit_ior @0 @1) (bit_and @0 @1))
1031 (bit_xor @0 @1))
1032
66cc6273
MP
1033/* (x | y) & ~(x & y) -> x ^ y */
1034(simplify
1035 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1036 (bit_xor @0 @1))
1037
1038/* (x | y) & (~x ^ y) -> x & y */
1039(simplify
1040 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1041 (bit_and @0 @1))
1042
fd8303a5
MC
1043/* (~x | y) & (x | ~y) -> ~(x ^ y) */
1044(simplify
1045 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1046 (bit_not (bit_xor @0 @1)))
1047
1048/* (~x | y) ^ (x | ~y) -> x ^ y */
1049(simplify
1050 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1051 (bit_xor @0 @1))
1052
5b00d921
RB
1053/* ~x & ~y -> ~(x | y)
1054 ~x | ~y -> ~(x & y) */
1055(for op (bit_and bit_ior)
1056 rop (bit_ior bit_and)
1057 (simplify
1058 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1059 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1060 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1061 (bit_not (rop (convert @0) (convert @1))))))
1062
14ea9f92 1063/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
1064 with a constant, and the two constants have no bits in common,
1065 we should treat this as a BIT_IOR_EXPR since this may produce more
1066 simplifications. */
14ea9f92
RB
1067(for op (bit_xor plus)
1068 (simplify
1069 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1070 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1071 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1072 && tree_nop_conversion_p (type, TREE_TYPE (@2))
8e6cdc90 1073 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
14ea9f92 1074 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
1075
1076/* (X | Y) ^ X -> Y & ~ X*/
1077(simplify
2eef1fc1 1078 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
5b00d921
RB
1079 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1080 (convert (bit_and @1 (bit_not @0)))))
1081
1082/* Convert ~X ^ ~Y to X ^ Y. */
1083(simplify
1084 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1085 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1086 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1087 (bit_xor (convert @0) (convert @1))))
1088
1089/* Convert ~X ^ C to X ^ ~C. */
1090(simplify
1091 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
1092 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1093 (bit_xor (convert @0) (bit_not @1))))
5b00d921 1094
e39dab2c
MG
1095/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1096(for opo (bit_and bit_xor)
1097 opi (bit_xor bit_and)
1098 (simplify
de5b5228 1099 (opo:c (opi:cs @0 @1) @1)
e39dab2c 1100 (bit_and (bit_not @0) @1)))
97e77391 1101
14ea9f92
RB
1102/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1103 operands are another bit-wise operation with a common input. If so,
1104 distribute the bit operations to save an operation and possibly two if
1105 constants are involved. For example, convert
1106 (A | B) & (A | C) into A | (B & C)
1107 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
1108(for op (bit_and bit_ior bit_xor)
1109 rop (bit_ior bit_and bit_and)
14ea9f92 1110 (simplify
2eef1fc1 1111 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
e07ab2fe
MG
1112 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1113 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
1114 (rop (convert @0) (op (convert @1) (convert @2))))))
1115
e39dab2c
MG
1116/* Some simple reassociation for bit operations, also handled in reassoc. */
1117/* (X & Y) & Y -> X & Y
1118 (X | Y) | Y -> X | Y */
1119(for op (bit_and bit_ior)
1120 (simplify
2eef1fc1 1121 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
e39dab2c
MG
1122 @2))
1123/* (X ^ Y) ^ Y -> X */
1124(simplify
2eef1fc1 1125 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
ece46666 1126 (convert @0))
e39dab2c
MG
1127/* (X & Y) & (X & Z) -> (X & Y) & Z
1128 (X | Y) | (X | Z) -> (X | Y) | Z */
1129(for op (bit_and bit_ior)
1130 (simplify
6c35e5b0 1131 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
e39dab2c
MG
1132 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1133 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1134 (if (single_use (@5) && single_use (@6))
1135 (op @3 (convert @2))
1136 (if (single_use (@3) && single_use (@4))
1137 (op (convert @1) @5))))))
1138/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1139(simplify
1140 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1141 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1142 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 1143 (bit_xor (convert @1) (convert @2))))
5b00d921 1144
64f7ea7c
KV
1145/* Convert abs (abs (X)) into abs (X).
1146 also absu (absu (X)) into absu (X). */
b14a9c57
RB
1147(simplify
1148 (abs (abs@1 @0))
1149 @1)
64f7ea7c
KV
1150
1151(simplify
1152 (absu (convert@2 (absu@1 @0)))
1153 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1154 @1))
1155
1156/* Convert abs[u] (-X) -> abs[u] (X). */
f3582e54
RB
1157(simplify
1158 (abs (negate @0))
1159 (abs @0))
64f7ea7c
KV
1160
1161(simplify
1162 (absu (negate @0))
1163 (absu @0))
1164
1165/* Convert abs[u] (X) where X is nonnegative -> (X). */
f3582e54
RB
1166(simplify
1167 (abs tree_expr_nonnegative_p@0)
1168 @0)
1169
64f7ea7c
KV
1170(simplify
1171 (absu tree_expr_nonnegative_p@0)
1172 (convert @0))
1173
55cf3946
RB
1174/* A few cases of fold-const.c negate_expr_p predicate. */
1175(match negate_expr_p
1176 INTEGER_CST
b14a9c57 1177 (if ((INTEGRAL_TYPE_P (type)
56a6d474 1178 && TYPE_UNSIGNED (type))
b14a9c57 1179 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
1180 && may_negate_without_overflow_p (t)))))
1181(match negate_expr_p
1182 FIXED_CST)
1183(match negate_expr_p
1184 (negate @0)
1185 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1186(match negate_expr_p
1187 REAL_CST
1188 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1189/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1190 ways. */
1191(match negate_expr_p
1192 VECTOR_CST
1193 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
81bd903a
MG
1194(match negate_expr_p
1195 (minus @0 @1)
1196 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1197 || (FLOAT_TYPE_P (type)
1198 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1199 && !HONOR_SIGNED_ZEROS (type)))))
0a8f32b8
RB
1200
1201/* (-A) * (-B) -> A * B */
1202(simplify
1203 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1204 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1205 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1206 (mult (convert @0) (convert (negate @1)))))
03cc70b5 1207
55cf3946 1208/* -(A + B) -> (-B) - A. */
b14a9c57 1209(simplify
55cf3946
RB
1210 (negate (plus:c @0 negate_expr_p@1))
1211 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1212 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1213 (minus (negate @1) @0)))
1214
81bd903a
MG
1215/* -(A - B) -> B - A. */
1216(simplify
1217 (negate (minus @0 @1))
1218 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1219 || (FLOAT_TYPE_P (type)
1220 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1221 && !HONOR_SIGNED_ZEROS (type)))
1222 (minus @1 @0)))
1af4ebf5
MG
1223(simplify
1224 (negate (pointer_diff @0 @1))
1225 (if (TYPE_OVERFLOW_UNDEFINED (type))
1226 (pointer_diff @1 @0)))
81bd903a 1227
55cf3946 1228/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 1229(simplify
55cf3946 1230 (minus @0 negate_expr_p@1)
e4e96a4f
KT
1231 (if (!FIXED_POINT_TYPE_P (type))
1232 (plus @0 (negate @1))))
d4573ffe 1233
5609420f
RB
1234/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1235 when profitable.
1236 For bitwise binary operations apply operand conversions to the
1237 binary operation result instead of to the operands. This allows
1238 to combine successive conversions and bitwise binary operations.
1239 We combine the above two cases by using a conditional convert. */
1240(for bitop (bit_and bit_ior bit_xor)
1241 (simplify
1242 (bitop (convert @0) (convert? @1))
1243 (if (((TREE_CODE (@1) == INTEGER_CST
1244 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 1245 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 1246 || types_match (@0, @1))
ad6f996c
RB
1247 /* ??? This transform conflicts with fold-const.c doing
1248 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1249 constants (if x has signed type, the sign bit cannot be set
1250 in c). This folds extension into the BIT_AND_EXPR.
1251 Restrict it to GIMPLE to avoid endless recursions. */
1252 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
1253 && (/* That's a good idea if the conversion widens the operand, thus
1254 after hoisting the conversion the operation will be narrower. */
1255 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1256 /* It's also a good idea if the conversion is to a non-integer
1257 mode. */
1258 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1259 /* Or if the precision of TO is not the same as the precision
1260 of its mode. */
2be65d9e 1261 || !type_has_mode_precision_p (type)))
5609420f
RB
1262 (convert (bitop @0 (convert @1))))))
1263
b14a9c57
RB
1264(for bitop (bit_and bit_ior)
1265 rbitop (bit_ior bit_and)
1266 /* (x | y) & x -> x */
1267 /* (x & y) | x -> x */
1268 (simplify
1269 (bitop:c (rbitop:c @0 @1) @0)
1270 @0)
1271 /* (~x | y) & x -> x & y */
1272 /* (~x & y) | x -> x | y */
1273 (simplify
1274 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1275 (bitop @0 @1)))
1276
5609420f
RB
1277/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1278(simplify
1279 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1280 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1281
1282/* Combine successive equal operations with constants. */
1283(for bitop (bit_and bit_ior bit_xor)
1284 (simplify
1285 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
fba05d9e
RS
1286 (if (!CONSTANT_CLASS_P (@0))
1287 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1288 folded to a constant. */
1289 (bitop @0 (bitop @1 @2))
1290 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1291 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1292 the values involved are such that the operation can't be decided at
1293 compile time. Try folding one of @0 or @1 with @2 to see whether
1294 that combination can be decided at compile time.
1295
1296 Keep the existing form if both folds fail, to avoid endless
1297 oscillation. */
1298 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1299 (if (cst1)
1300 (bitop @1 { cst1; })
1301 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1302 (if (cst2)
1303 (bitop @0 { cst2; }))))))))
5609420f
RB
1304
1305/* Try simple folding for X op !X, and X op X with the help
1306 of the truth_valued_p and logical_inverted_value predicates. */
1307(match truth_valued_p
1308 @0
1309 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 1310(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
1311 (match truth_valued_p
1312 (op @0 @1)))
1313(match truth_valued_p
1314 (truth_not @0))
1315
0a8f32b8
RB
1316(match (logical_inverted_value @0)
1317 (truth_not @0))
5609420f
RB
1318(match (logical_inverted_value @0)
1319 (bit_not truth_valued_p@0))
1320(match (logical_inverted_value @0)
09240451 1321 (eq @0 integer_zerop))
5609420f 1322(match (logical_inverted_value @0)
09240451 1323 (ne truth_valued_p@0 integer_truep))
5609420f 1324(match (logical_inverted_value @0)
09240451 1325 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
1326
1327/* X & !X -> 0. */
1328(simplify
1329 (bit_and:c @0 (logical_inverted_value @0))
1330 { build_zero_cst (type); })
1331/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1332(for op (bit_ior bit_xor)
1333 (simplify
1334 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 1335 { constant_boolean_node (true, type); }))
59c20dc7
RB
1336/* X ==/!= !X is false/true. */
1337(for op (eq ne)
1338 (simplify
1339 (op:c truth_valued_p@0 (logical_inverted_value @0))
1340 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 1341
5609420f
RB
1342/* ~~x -> x */
1343(simplify
1344 (bit_not (bit_not @0))
1345 @0)
1346
b14a9c57
RB
1347/* Convert ~ (-A) to A - 1. */
1348(simplify
1349 (bit_not (convert? (negate @0)))
ece46666
MG
1350 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1351 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 1352 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57 1353
81bd903a
MG
1354/* Convert - (~A) to A + 1. */
1355(simplify
1356 (negate (nop_convert (bit_not @0)))
1357 (plus (view_convert @0) { build_each_one_cst (type); }))
1358
b14a9c57
RB
1359/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1360(simplify
8b5ee871 1361 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
1362 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1363 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1364 (convert (negate @0))))
1365(simplify
1366 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
1367 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1368 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1369 (convert (negate @0))))
1370
1371/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1372(simplify
1373 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1374 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1375 (convert (bit_xor @0 (bit_not @1)))))
1376(simplify
1377 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1378 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1379 (convert (bit_xor @0 @1))))
1380
e268a77b
MG
1381/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1382(simplify
1383 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1384 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1385 (bit_not (bit_xor (view_convert @0) @1))))
1386
f52baa7b
MP
1387/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1388(simplify
44fc0a51
RB
1389 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1390 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 1391
f7b7b0aa
MP
1392/* Fold A - (A & B) into ~B & A. */
1393(simplify
2eef1fc1 1394 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
f7b7b0aa
MP
1395 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1396 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1397 (convert (bit_and (bit_not @1) @0))))
5609420f 1398
2071f8f9
N
1399/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1400(for cmp (gt lt ge le)
1401(simplify
1402 (mult (convert (cmp @0 @1)) @2)
1403 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1404
e36c1cfe
N
1405/* For integral types with undefined overflow and C != 0 fold
1406 x * C EQ/NE y * C into x EQ/NE y. */
1407(for cmp (eq ne)
1408 (simplify
1409 (cmp (mult:c @0 @1) (mult:c @2 @1))
1410 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1411 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1412 && tree_expr_nonzero_p (@1))
1413 (cmp @0 @2))))
1414
42bd89ce
MG
1415/* For integral types with wrapping overflow and C odd fold
1416 x * C EQ/NE y * C into x EQ/NE y. */
1417(for cmp (eq ne)
1418 (simplify
1419 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1420 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1421 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1422 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1423 (cmp @0 @2))))
1424
e36c1cfe
N
1425/* For integral types with undefined overflow and C != 0 fold
1426 x * C RELOP y * C into:
84ff66b8 1427
e36c1cfe
N
1428 x RELOP y for nonnegative C
1429 y RELOP x for negative C */
1430(for cmp (lt gt le ge)
1431 (simplify
1432 (cmp (mult:c @0 @1) (mult:c @2 @1))
1433 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1434 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1435 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1436 (cmp @0 @2)
1437 (if (TREE_CODE (@1) == INTEGER_CST
8e6cdc90 1438 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
e36c1cfe 1439 (cmp @2 @0))))))
84ff66b8 1440
564e405c
JJ
1441/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1442(for cmp (le gt)
1443 icmp (gt le)
1444 (simplify
1445 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1446 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1447 && TYPE_UNSIGNED (TREE_TYPE (@0))
1448 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
8e6cdc90
RS
1449 && (wi::to_wide (@2)
1450 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
564e405c
JJ
1451 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1452 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1453
a8492d5e
MG
1454/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1455(for cmp (simple_comparison)
1456 (simplify
1457 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
8e6cdc90 1458 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
a8492d5e
MG
1459 (cmp @0 @1))))
1460
8d1628eb
JJ
1461/* X / C1 op C2 into a simple range test. */
1462(for cmp (simple_comparison)
1463 (simplify
1464 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1465 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1466 && integer_nonzerop (@1)
1467 && !TREE_OVERFLOW (@1)
1468 && !TREE_OVERFLOW (@2))
1469 (with { tree lo, hi; bool neg_overflow;
1470 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1471 &neg_overflow); }
1472 (switch
1473 (if (code == LT_EXPR || code == GE_EXPR)
1474 (if (TREE_OVERFLOW (lo))
1475 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1476 (if (code == LT_EXPR)
1477 (lt @0 { lo; })
1478 (ge @0 { lo; }))))
1479 (if (code == LE_EXPR || code == GT_EXPR)
1480 (if (TREE_OVERFLOW (hi))
1481 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1482 (if (code == LE_EXPR)
1483 (le @0 { hi; })
1484 (gt @0 { hi; }))))
1485 (if (!lo && !hi)
1486 { build_int_cst (type, code == NE_EXPR); })
1487 (if (code == EQ_EXPR && !hi)
1488 (ge @0 { lo; }))
1489 (if (code == EQ_EXPR && !lo)
1490 (le @0 { hi; }))
1491 (if (code == NE_EXPR && !hi)
1492 (lt @0 { lo; }))
1493 (if (code == NE_EXPR && !lo)
1494 (gt @0 { hi; }))
1495 (if (GENERIC)
1496 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1497 lo, hi); })
1498 (with
1499 {
1500 tree etype = range_check_type (TREE_TYPE (@0));
1501 if (etype)
1502 {
1503 if (! TYPE_UNSIGNED (etype))
1504 etype = unsigned_type_for (etype);
1505 hi = fold_convert (etype, hi);
1506 lo = fold_convert (etype, lo);
1507 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1508 }
1509 }
1510 (if (etype && hi && !TREE_OVERFLOW (hi))
1511 (if (code == EQ_EXPR)
1512 (le (minus (convert:etype @0) { lo; }) { hi; })
1513 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1514
d35256b6
MG
1515/* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1516(for op (lt le ge gt)
1517 (simplify
1518 (op (plus:c @0 @2) (plus:c @1 @2))
1519 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1520 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1521 (op @0 @1))))
1522/* For equality and subtraction, this is also true with wrapping overflow. */
1523(for op (eq ne minus)
1524 (simplify
1525 (op (plus:c @0 @2) (plus:c @1 @2))
1526 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1527 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1528 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1529 (op @0 @1))))
1530
1531/* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1532(for op (lt le ge gt)
1533 (simplify
1534 (op (minus @0 @2) (minus @1 @2))
1535 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1536 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1537 (op @0 @1))))
1538/* For equality and subtraction, this is also true with wrapping overflow. */
1539(for op (eq ne minus)
1540 (simplify
1541 (op (minus @0 @2) (minus @1 @2))
1542 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1543 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1544 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1545 (op @0 @1))))
1af4ebf5
MG
1546/* And for pointers... */
1547(for op (simple_comparison)
1548 (simplify
1549 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1550 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1551 (op @0 @1))))
1552(simplify
1553 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1554 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1555 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1556 (pointer_diff @0 @1)))
d35256b6
MG
1557
1558/* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1559(for op (lt le ge gt)
1560 (simplify
1561 (op (minus @2 @0) (minus @2 @1))
1562 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1563 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1564 (op @1 @0))))
1565/* For equality and subtraction, this is also true with wrapping overflow. */
1566(for op (eq ne minus)
1567 (simplify
1568 (op (minus @2 @0) (minus @2 @1))
1569 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1570 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1571 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1572 (op @1 @0))))
1af4ebf5
MG
1573/* And for pointers... */
1574(for op (simple_comparison)
1575 (simplify
1576 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1577 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1578 (op @1 @0))))
1579(simplify
1580 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1581 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1582 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1583 (pointer_diff @1 @0)))
d35256b6 1584
6358a676
MG
1585/* X + Y < Y is the same as X < 0 when there is no overflow. */
1586(for op (lt le gt ge)
1587 (simplify
1588 (op:c (plus:c@2 @0 @1) @1)
1589 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1590 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
cbd42900 1591 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
6358a676
MG
1592 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1593 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1594/* For equality, this is also true with wrapping overflow. */
1595(for op (eq ne)
1596 (simplify
1597 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1598 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1599 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1600 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1601 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1602 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1603 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1604 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1605 (simplify
1606 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1607 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1608 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1609 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1610 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1611
1612/* X - Y < X is the same as Y > 0 when there is no overflow.
1613 For equality, this is also true with wrapping overflow. */
1614(for op (simple_comparison)
1615 (simplify
1616 (op:c @0 (minus@2 @0 @1))
1617 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1618 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1619 || ((op == EQ_EXPR || op == NE_EXPR)
1620 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1621 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1622 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1623
1d6fadee 1624/* Transform:
b8d85005
JJ
1625 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1626 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1d6fadee
PK
1627(for cmp (eq ne)
1628 ocmp (lt ge)
1629 (simplify
1630 (cmp (trunc_div @0 @1) integer_zerop)
1631 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
b8d85005
JJ
1632 /* Complex ==/!= is allowed, but not </>=. */
1633 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1d6fadee
PK
1634 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1635 (ocmp @0 @1))))
1636
8b656ca7
MG
1637/* X == C - X can never be true if C is odd. */
1638(for cmp (eq ne)
1639 (simplify
1640 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1641 (if (TREE_INT_CST_LOW (@1) & 1)
1642 { constant_boolean_node (cmp == NE_EXPR, type); })))
1643
10bc8017
MG
1644/* Arguments on which one can call get_nonzero_bits to get the bits
1645 possibly set. */
1646(match with_possible_nonzero_bits
1647 INTEGER_CST@0)
1648(match with_possible_nonzero_bits
1649 SSA_NAME@0
1650 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1651/* Slightly extended version, do not make it recursive to keep it cheap. */
1652(match (with_possible_nonzero_bits2 @0)
1653 with_possible_nonzero_bits@0)
1654(match (with_possible_nonzero_bits2 @0)
1655 (bit_and:c with_possible_nonzero_bits@0 @2))
1656
1657/* Same for bits that are known to be set, but we do not have
1658 an equivalent to get_nonzero_bits yet. */
1659(match (with_certain_nonzero_bits2 @0)
1660 INTEGER_CST@0)
1661(match (with_certain_nonzero_bits2 @0)
1662 (bit_ior @1 INTEGER_CST@0))
1663
1664/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1665(for cmp (eq ne)
1666 (simplify
1667 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
8e6cdc90 1668 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
10bc8017
MG
1669 { constant_boolean_node (cmp == NE_EXPR, type); })))
1670
84ff66b8
AV
1671/* ((X inner_op C0) outer_op C1)
1672 With X being a tree where value_range has reasoned certain bits to always be
1673 zero throughout its computed value range,
1674 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1675 where zero_mask has 1's for all bits that are sure to be 0 in
1676 and 0's otherwise.
1677 if (inner_op == '^') C0 &= ~C1;
1678 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1679 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1680*/
1681(for inner_op (bit_ior bit_xor)
1682 outer_op (bit_xor bit_ior)
1683(simplify
1684 (outer_op
1685 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1686 (with
1687 {
1688 bool fail = false;
1689 wide_int zero_mask_not;
1690 wide_int C0;
1691 wide_int cst_emit;
1692
1693 if (TREE_CODE (@2) == SSA_NAME)
1694 zero_mask_not = get_nonzero_bits (@2);
1695 else
1696 fail = true;
1697
1698 if (inner_op == BIT_XOR_EXPR)
1699 {
8e6cdc90
RS
1700 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1701 cst_emit = C0 | wi::to_wide (@1);
84ff66b8
AV
1702 }
1703 else
1704 {
8e6cdc90
RS
1705 C0 = wi::to_wide (@0);
1706 cst_emit = C0 ^ wi::to_wide (@1);
84ff66b8
AV
1707 }
1708 }
8e6cdc90 1709 (if (!fail && (C0 & zero_mask_not) == 0)
84ff66b8 1710 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
8e6cdc90 1711 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
84ff66b8
AV
1712 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1713
a499aac5
RB
1714/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1715(simplify
44fc0a51
RB
1716 (pointer_plus (pointer_plus:s @0 @1) @3)
1717 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1718
1719/* Pattern match
1720 tem1 = (long) ptr1;
1721 tem2 = (long) ptr2;
1722 tem3 = tem2 - tem1;
1723 tem4 = (unsigned long) tem3;
1724 tem5 = ptr1 + tem4;
1725 and produce
1726 tem5 = ptr2; */
1727(simplify
1728 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1729 /* Conditionally look through a sign-changing conversion. */
1730 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1731 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1732 || (GENERIC && type == TREE_TYPE (@1))))
1733 @1))
1af4ebf5
MG
1734(simplify
1735 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1736 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1737 (convert @1)))
a499aac5
RB
1738
1739/* Pattern match
1740 tem = (sizetype) ptr;
1741 tem = tem & algn;
1742 tem = -tem;
1743 ... = ptr p+ tem;
1744 and produce the simpler and easier to analyze with respect to alignment
1745 ... = ptr & ~algn; */
1746(simplify
1747 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
8e6cdc90 1748 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
a499aac5
RB
1749 (bit_and @0 { algn; })))
1750
99e943a2
RB
1751/* Try folding difference of addresses. */
1752(simplify
1753 (minus (convert ADDR_EXPR@0) (convert @1))
1754 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1755 (with { poly_int64 diff; }
99e943a2
RB
1756 (if (ptr_difference_const (@0, @1, &diff))
1757 { build_int_cst_type (type, diff); }))))
1758(simplify
1759 (minus (convert @0) (convert ADDR_EXPR@1))
1760 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1761 (with { poly_int64 diff; }
99e943a2
RB
1762 (if (ptr_difference_const (@0, @1, &diff))
1763 { build_int_cst_type (type, diff); }))))
1af4ebf5 1764(simplify
67fccea4 1765 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1af4ebf5
MG
1766 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1767 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1768 (with { poly_int64 diff; }
1af4ebf5
MG
1769 (if (ptr_difference_const (@0, @1, &diff))
1770 { build_int_cst_type (type, diff); }))))
1771(simplify
67fccea4 1772 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1af4ebf5
MG
1773 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1774 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1775 (with { poly_int64 diff; }
1af4ebf5
MG
1776 (if (ptr_difference_const (@0, @1, &diff))
1777 { build_int_cst_type (type, diff); }))))
99e943a2 1778
bab73f11
RB
1779/* If arg0 is derived from the address of an object or function, we may
1780 be able to fold this expression using the object or function's
1781 alignment. */
1782(simplify
1783 (bit_and (convert? @0) INTEGER_CST@1)
1784 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1785 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1786 (with
1787 {
1788 unsigned int align;
1789 unsigned HOST_WIDE_INT bitpos;
1790 get_pointer_alignment_1 (@0, &align, &bitpos);
1791 }
8e6cdc90
RS
1792 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1793 { wide_int_to_tree (type, (wi::to_wide (@1)
1794 & (bitpos / BITS_PER_UNIT))); }))))
99e943a2 1795
a499aac5 1796
cc7b5acf
RB
1797/* We can't reassociate at all for saturating types. */
1798(if (!TYPE_SATURATING (type))
1799
1800 /* Contract negates. */
1801 /* A + (-B) -> A - B */
1802 (simplify
248179b5
RB
1803 (plus:c @0 (convert? (negate @1)))
1804 /* Apply STRIP_NOPS on the negate. */
1805 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1806 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
1807 (with
1808 {
1809 tree t1 = type;
1810 if (INTEGRAL_TYPE_P (type)
1811 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1812 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1813 }
1814 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
cc7b5acf
RB
1815 /* A - (-B) -> A + B */
1816 (simplify
248179b5
RB
1817 (minus @0 (convert? (negate @1)))
1818 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1819 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
1820 (with
1821 {
1822 tree t1 = type;
1823 if (INTEGRAL_TYPE_P (type)
1824 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1825 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1826 }
1827 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
63626547
MG
1828 /* -(T)(-A) -> (T)A
1829 Sign-extension is ok except for INT_MIN, which thankfully cannot
1830 happen without overflow. */
1831 (simplify
1832 (negate (convert (negate @1)))
1833 (if (INTEGRAL_TYPE_P (type)
1834 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1835 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1836 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1837 && !TYPE_OVERFLOW_SANITIZED (type)
1838 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
a0f12cf8 1839 (convert @1)))
63626547
MG
1840 (simplify
1841 (negate (convert negate_expr_p@1))
1842 (if (SCALAR_FLOAT_TYPE_P (type)
1843 && ((DECIMAL_FLOAT_TYPE_P (type)
1844 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1845 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1846 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1847 (convert (negate @1))))
1848 (simplify
1849 (negate (nop_convert (negate @1)))
1850 (if (!TYPE_OVERFLOW_SANITIZED (type)
1851 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1852 (view_convert @1)))
cc7b5acf 1853
7318e44f
RB
1854 /* We can't reassociate floating-point unless -fassociative-math
1855 or fixed-point plus or minus because of saturation to +-Inf. */
1856 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1857 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
1858
1859 /* Match patterns that allow contracting a plus-minus pair
1860 irrespective of overflow issues. */
1861 /* (A +- B) - A -> +- B */
1862 /* (A +- B) -+ B -> A */
1863 /* A - (A +- B) -> -+ B */
1864 /* A +- (B -+ A) -> +- B */
1865 (simplify
1866 (minus (plus:c @0 @1) @0)
1867 @1)
1868 (simplify
1869 (minus (minus @0 @1) @0)
1870 (negate @1))
1871 (simplify
1872 (plus:c (minus @0 @1) @1)
1873 @0)
1874 (simplify
1875 (minus @0 (plus:c @0 @1))
1876 (negate @1))
1877 (simplify
1878 (minus @0 (minus @0 @1))
1879 @1)
1e7df2e6
MG
1880 /* (A +- B) + (C - A) -> C +- B */
1881 /* (A + B) - (A - C) -> B + C */
1882 /* More cases are handled with comparisons. */
1883 (simplify
1884 (plus:c (plus:c @0 @1) (minus @2 @0))
1885 (plus @2 @1))
1886 (simplify
1887 (plus:c (minus @0 @1) (minus @2 @0))
1888 (minus @2 @1))
1af4ebf5
MG
1889 (simplify
1890 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1891 (if (TYPE_OVERFLOW_UNDEFINED (type)
1892 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1893 (pointer_diff @2 @1)))
1e7df2e6
MG
1894 (simplify
1895 (minus (plus:c @0 @1) (minus @0 @2))
1896 (plus @1 @2))
cc7b5acf 1897
ed73f46f
MG
1898 /* (A +- CST1) +- CST2 -> A + CST3
1899 Use view_convert because it is safe for vectors and equivalent for
1900 scalars. */
cc7b5acf
RB
1901 (for outer_op (plus minus)
1902 (for inner_op (plus minus)
ed73f46f 1903 neg_inner_op (minus plus)
cc7b5acf 1904 (simplify
ed73f46f
MG
1905 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1906 CONSTANT_CLASS_P@2)
1907 /* If one of the types wraps, use that one. */
1908 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3eb1eecf
JJ
1909 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1910 forever if something doesn't simplify into a constant. */
1911 (if (!CONSTANT_CLASS_P (@0))
1912 (if (outer_op == PLUS_EXPR)
1913 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1914 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
ed73f46f
MG
1915 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1916 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1917 (if (outer_op == PLUS_EXPR)
1918 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1919 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1920 /* If the constant operation overflows we cannot do the transform
1921 directly as we would introduce undefined overflow, for example
1922 with (a - 1) + INT_MIN. */
1923 (if (types_match (type, @0))
1924 (with { tree cst = const_binop (outer_op == inner_op
1925 ? PLUS_EXPR : MINUS_EXPR,
1926 type, @1, @2); }
1927 (if (cst && !TREE_OVERFLOW (cst))
1928 (inner_op @0 { cst; } )
1929 /* X+INT_MAX+1 is X-INT_MIN. */
1930 (if (INTEGRAL_TYPE_P (type) && cst
8e6cdc90
RS
1931 && wi::to_wide (cst) == wi::min_value (type))
1932 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
ed73f46f
MG
1933 /* Last resort, use some unsigned type. */
1934 (with { tree utype = unsigned_type_for (type); }
48fcd201
JJ
1935 (if (utype)
1936 (view_convert (inner_op
1937 (view_convert:utype @0)
1938 (view_convert:utype
1939 { drop_tree_overflow (cst); }))))))))))))))
cc7b5acf 1940
b302f2e0 1941 /* (CST1 - A) +- CST2 -> CST3 - A */
cc7b5acf
RB
1942 (for outer_op (plus minus)
1943 (simplify
1944 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
23f27839 1945 (with { tree cst = const_binop (outer_op, type, @1, @2); }
cc7b5acf
RB
1946 (if (cst && !TREE_OVERFLOW (cst))
1947 (minus { cst; } @0)))))
1948
b302f2e0
RB
1949 /* CST1 - (CST2 - A) -> CST3 + A */
1950 (simplify
1951 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1952 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1953 (if (cst && !TREE_OVERFLOW (cst))
1954 (plus { cst; } @0))))
1955
cc7b5acf
RB
1956 /* ~A + A -> -1 */
1957 (simplify
1958 (plus:c (bit_not @0) @0)
1959 (if (!TYPE_OVERFLOW_TRAPS (type))
1960 { build_all_ones_cst (type); }))
1961
1962 /* ~A + 1 -> -A */
1963 (simplify
e19740ae
RB
1964 (plus (convert? (bit_not @0)) integer_each_onep)
1965 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1966 (negate (convert @0))))
1967
1968 /* -A - 1 -> ~A */
1969 (simplify
1970 (minus (convert? (negate @0)) integer_each_onep)
1971 (if (!TYPE_OVERFLOW_TRAPS (type)
1972 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1973 (bit_not (convert @0))))
1974
1975 /* -1 - A -> ~A */
1976 (simplify
1977 (minus integer_all_onesp @0)
bc4315fb 1978 (bit_not @0))
cc7b5acf
RB
1979
1980 /* (T)(P + A) - (T)P -> (T) A */
d7f44d4d 1981 (simplify
a72610d4
JJ
1982 (minus (convert (plus:c @@0 @1))
1983 (convert? @0))
d7f44d4d
JJ
1984 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1985 /* For integer types, if A has a smaller type
1986 than T the result depends on the possible
1987 overflow in P + A.
1988 E.g. T=size_t, A=(unsigned)429497295, P>0.
1989 However, if an overflow in P + A would cause
1990 undefined behavior, we can assume that there
1991 is no overflow. */
a72610d4
JJ
1992 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1993 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
1994 (convert @1)))
1995 (simplify
1996 (minus (convert (pointer_plus @@0 @1))
1997 (convert @0))
1998 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1999 /* For pointer types, if the conversion of A to the
2000 final type requires a sign- or zero-extension,
2001 then we have to punt - it is not defined which
2002 one is correct. */
2003 || (POINTER_TYPE_P (TREE_TYPE (@0))
2004 && TREE_CODE (@1) == INTEGER_CST
2005 && tree_int_cst_sign_bit (@1) == 0))
2006 (convert @1)))
1af4ebf5
MG
2007 (simplify
2008 (pointer_diff (pointer_plus @@0 @1) @0)
2009 /* The second argument of pointer_plus must be interpreted as signed, and
2010 thus sign-extended if necessary. */
2011 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2012 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2013 second arg is unsigned even when we need to consider it as signed,
2014 we don't want to diagnose overflow here. */
2015 (convert (view_convert:stype @1))))
a8fc2579
RB
2016
2017 /* (T)P - (T)(P + A) -> -(T) A */
d7f44d4d 2018 (simplify
a72610d4
JJ
2019 (minus (convert? @0)
2020 (convert (plus:c @@0 @1)))
d7f44d4d
JJ
2021 (if (INTEGRAL_TYPE_P (type)
2022 && TYPE_OVERFLOW_UNDEFINED (type)
2023 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2024 (with { tree utype = unsigned_type_for (type); }
2025 (convert (negate (convert:utype @1))))
a8fc2579
RB
2026 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2027 /* For integer types, if A has a smaller type
2028 than T the result depends on the possible
2029 overflow in P + A.
2030 E.g. T=size_t, A=(unsigned)429497295, P>0.
2031 However, if an overflow in P + A would cause
2032 undefined behavior, we can assume that there
2033 is no overflow. */
a72610d4
JJ
2034 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2035 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
2036 (negate (convert @1)))))
2037 (simplify
2038 (minus (convert @0)
2039 (convert (pointer_plus @@0 @1)))
2040 (if (INTEGRAL_TYPE_P (type)
2041 && TYPE_OVERFLOW_UNDEFINED (type)
2042 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2043 (with { tree utype = unsigned_type_for (type); }
2044 (convert (negate (convert:utype @1))))
2045 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2046 /* For pointer types, if the conversion of A to the
2047 final type requires a sign- or zero-extension,
2048 then we have to punt - it is not defined which
2049 one is correct. */
2050 || (POINTER_TYPE_P (TREE_TYPE (@0))
2051 && TREE_CODE (@1) == INTEGER_CST
2052 && tree_int_cst_sign_bit (@1) == 0))
2053 (negate (convert @1)))))
1af4ebf5
MG
2054 (simplify
2055 (pointer_diff @0 (pointer_plus @@0 @1))
2056 /* The second argument of pointer_plus must be interpreted as signed, and
2057 thus sign-extended if necessary. */
2058 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2059 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2060 second arg is unsigned even when we need to consider it as signed,
2061 we don't want to diagnose overflow here. */
2062 (negate (convert (view_convert:stype @1)))))
a8fc2579
RB
2063
2064 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
d7f44d4d 2065 (simplify
a72610d4 2066 (minus (convert (plus:c @@0 @1))
d7f44d4d
JJ
2067 (convert (plus:c @0 @2)))
2068 (if (INTEGRAL_TYPE_P (type)
2069 && TYPE_OVERFLOW_UNDEFINED (type)
a72610d4
JJ
2070 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2071 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
d7f44d4d
JJ
2072 (with { tree utype = unsigned_type_for (type); }
2073 (convert (minus (convert:utype @1) (convert:utype @2))))
a72610d4
JJ
2074 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2075 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2076 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2077 /* For integer types, if A has a smaller type
2078 than T the result depends on the possible
2079 overflow in P + A.
2080 E.g. T=size_t, A=(unsigned)429497295, P>0.
2081 However, if an overflow in P + A would cause
2082 undefined behavior, we can assume that there
2083 is no overflow. */
2084 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2085 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2086 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2087 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
d7f44d4d
JJ
2088 (minus (convert @1) (convert @2)))))
2089 (simplify
2090 (minus (convert (pointer_plus @@0 @1))
2091 (convert (pointer_plus @0 @2)))
2092 (if (INTEGRAL_TYPE_P (type)
2093 && TYPE_OVERFLOW_UNDEFINED (type)
2094 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2095 (with { tree utype = unsigned_type_for (type); }
2096 (convert (minus (convert:utype @1) (convert:utype @2))))
2097 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2098 /* For pointer types, if the conversion of A to the
2099 final type requires a sign- or zero-extension,
2100 then we have to punt - it is not defined which
2101 one is correct. */
2102 || (POINTER_TYPE_P (TREE_TYPE (@0))
2103 && TREE_CODE (@1) == INTEGER_CST
2104 && tree_int_cst_sign_bit (@1) == 0
2105 && TREE_CODE (@2) == INTEGER_CST
2106 && tree_int_cst_sign_bit (@2) == 0))
d7f44d4d 2107 (minus (convert @1) (convert @2)))))
1af4ebf5
MG
2108 (simplify
2109 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2110 /* The second argument of pointer_plus must be interpreted as signed, and
2111 thus sign-extended if necessary. */
2112 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2113 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2114 second arg is unsigned even when we need to consider it as signed,
2115 we don't want to diagnose overflow here. */
2116 (minus (convert (view_convert:stype @1))
2117 (convert (view_convert:stype @2)))))))
cc7b5acf 2118
5b55e6e3
RB
2119/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2120 Modeled after fold_plusminus_mult_expr. */
2121(if (!TYPE_SATURATING (type)
2122 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2123 (for plusminus (plus minus)
2124 (simplify
c1bbe5b3
RB
2125 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2126 (if ((!ANY_INTEGRAL_TYPE_P (type)
5b55e6e3
RB
2127 || TYPE_OVERFLOW_WRAPS (type)
2128 || (INTEGRAL_TYPE_P (type)
2129 && tree_expr_nonzero_p (@0)
2130 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
c1bbe5b3
RB
2131 /* If @1 +- @2 is constant require a hard single-use on either
2132 original operand (but not on both). */
2133 && (single_use (@3) || single_use (@4)))
2134 (mult (plusminus @1 @2) @0)))
2135 /* We cannot generate constant 1 for fract. */
2136 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2137 (simplify
2138 (plusminus @0 (mult:c@3 @0 @2))
2139 (if ((!ANY_INTEGRAL_TYPE_P (type)
2140 || TYPE_OVERFLOW_WRAPS (type)
2141 || (INTEGRAL_TYPE_P (type)
2142 && tree_expr_nonzero_p (@0)
2143 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2144 && single_use (@3))
5b55e6e3
RB
2145 (mult (plusminus { build_one_cst (type); } @2) @0)))
2146 (simplify
c1bbe5b3
RB
2147 (plusminus (mult:c@3 @0 @2) @0)
2148 (if ((!ANY_INTEGRAL_TYPE_P (type)
2149 || TYPE_OVERFLOW_WRAPS (type)
2150 || (INTEGRAL_TYPE_P (type)
2151 && tree_expr_nonzero_p (@0)
2152 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2153 && single_use (@3))
5b55e6e3 2154 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
cc7b5acf 2155
0122e8e5 2156/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 2157
c6cfa2bf 2158(for minmax (min max FMIN_ALL FMAX_ALL)
a7f24614
RB
2159 (simplify
2160 (minmax @0 @0)
2161 @0))
4a334cba
RS
2162/* min(max(x,y),y) -> y. */
2163(simplify
2164 (min:c (max:c @0 @1) @1)
2165 @1)
2166/* max(min(x,y),y) -> y. */
2167(simplify
2168 (max:c (min:c @0 @1) @1)
2169 @1)
d657e995
RB
2170/* max(a,-a) -> abs(a). */
2171(simplify
2172 (max:c @0 (negate @0))
2173 (if (TREE_CODE (type) != COMPLEX_TYPE
2174 && (! ANY_INTEGRAL_TYPE_P (type)
2175 || TYPE_OVERFLOW_UNDEFINED (type)))
2176 (abs @0)))
54f84ca9
RB
2177/* min(a,-a) -> -abs(a). */
2178(simplify
2179 (min:c @0 (negate @0))
2180 (if (TREE_CODE (type) != COMPLEX_TYPE
2181 && (! ANY_INTEGRAL_TYPE_P (type)
2182 || TYPE_OVERFLOW_UNDEFINED (type)))
2183 (negate (abs @0))))
a7f24614
RB
2184(simplify
2185 (min @0 @1)
2c2870a1
MG
2186 (switch
2187 (if (INTEGRAL_TYPE_P (type)
2188 && TYPE_MIN_VALUE (type)
2189 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2190 @1)
2191 (if (INTEGRAL_TYPE_P (type)
2192 && TYPE_MAX_VALUE (type)
2193 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2194 @0)))
a7f24614
RB
2195(simplify
2196 (max @0 @1)
2c2870a1
MG
2197 (switch
2198 (if (INTEGRAL_TYPE_P (type)
2199 && TYPE_MAX_VALUE (type)
2200 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2201 @1)
2202 (if (INTEGRAL_TYPE_P (type)
2203 && TYPE_MIN_VALUE (type)
2204 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2205 @0)))
ad6e4ba8 2206
182f37c9
N
2207/* max (a, a + CST) -> a + CST where CST is positive. */
2208/* max (a, a + CST) -> a where CST is negative. */
2209(simplify
2210 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2211 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2212 (if (tree_int_cst_sgn (@1) > 0)
2213 @2
2214 @0)))
2215
2216/* min (a, a + CST) -> a where CST is positive. */
2217/* min (a, a + CST) -> a + CST where CST is negative. */
2218(simplify
2219 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2220 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2221 (if (tree_int_cst_sgn (@1) > 0)
2222 @0
2223 @2)))
2224
ad6e4ba8
BC
2225/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2226 and the outer convert demotes the expression back to x's type. */
2227(for minmax (min max)
2228 (simplify
2229 (convert (minmax@0 (convert @1) INTEGER_CST@2))
ebf41734
BC
2230 (if (INTEGRAL_TYPE_P (type)
2231 && types_match (@1, type) && int_fits_type_p (@2, type)
ad6e4ba8
BC
2232 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2233 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2234 (minmax @1 (convert @2)))))
2235
c6cfa2bf 2236(for minmax (FMIN_ALL FMAX_ALL)
0122e8e5
RS
2237 /* If either argument is NaN, return the other one. Avoid the
2238 transformation if we get (and honor) a signalling NaN. */
2239 (simplify
2240 (minmax:c @0 REAL_CST@1)
2241 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2242 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2243 @0)))
2244/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2245 functions to return the numeric arg if the other one is NaN.
2246 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2247 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2248 worry about it either. */
2249(if (flag_finite_math_only)
2250 (simplify
c6cfa2bf 2251 (FMIN_ALL @0 @1)
0122e8e5 2252 (min @0 @1))
4119b2eb 2253 (simplify
c6cfa2bf 2254 (FMAX_ALL @0 @1)
0122e8e5 2255 (max @0 @1)))
ce0e66ff 2256/* min (-A, -B) -> -max (A, B) */
c6cfa2bf
MM
2257(for minmax (min max FMIN_ALL FMAX_ALL)
2258 maxmin (max min FMAX_ALL FMIN_ALL)
ce0e66ff
MG
2259 (simplify
2260 (minmax (negate:s@2 @0) (negate:s@3 @1))
2261 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2262 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2263 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2264 (negate (maxmin @0 @1)))))
2265/* MIN (~X, ~Y) -> ~MAX (X, Y)
2266 MAX (~X, ~Y) -> ~MIN (X, Y) */
2267(for minmax (min max)
2268 maxmin (max min)
2269 (simplify
2270 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2271 (bit_not (maxmin @0 @1))))
a7f24614 2272
b4817bd6
MG
2273/* MIN (X, Y) == X -> X <= Y */
2274(for minmax (min min max max)
2275 cmp (eq ne eq ne )
2276 out (le gt ge lt )
2277 (simplify
2278 (cmp:c (minmax:c @0 @1) @0)
2279 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2280 (out @0 @1))))
2281/* MIN (X, 5) == 0 -> X == 0
2282 MIN (X, 5) == 7 -> false */
2283(for cmp (eq ne)
2284 (simplify
2285 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2286 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2287 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2288 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2289 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2290 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2291 (cmp @0 @2)))))
2292(for cmp (eq ne)
2293 (simplify
2294 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2295 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2296 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2297 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2298 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2299 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2300 (cmp @0 @2)))))
2301/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2302(for minmax (min min max max min min max max )
2303 cmp (lt le gt ge gt ge lt le )
2304 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2305 (simplify
2306 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2307 (comb (cmp @0 @2) (cmp @1 @2))))
2308
a7f24614
RB
2309/* Simplifications of shift and rotates. */
2310
2311(for rotate (lrotate rrotate)
2312 (simplify
2313 (rotate integer_all_onesp@0 @1)
2314 @0))
2315
2316/* Optimize -1 >> x for arithmetic right shifts. */
2317(simplify
2318 (rshift integer_all_onesp@0 @1)
2319 (if (!TYPE_UNSIGNED (type)
2320 && tree_expr_nonnegative_p (@1))
2321 @0))
2322
12085390
N
2323/* Optimize (x >> c) << c into x & (-1<<c). */
2324(simplify
2325 (lshift (rshift @0 INTEGER_CST@1) @1)
8e6cdc90 2326 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
12085390
N
2327 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2328
2329/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2330 types. */
2331(simplify
2332 (rshift (lshift @0 INTEGER_CST@1) @1)
2333 (if (TYPE_UNSIGNED (type)
8e6cdc90 2334 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
12085390
N
2335 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2336
a7f24614
RB
2337(for shiftrotate (lrotate rrotate lshift rshift)
2338 (simplify
2339 (shiftrotate @0 integer_zerop)
2340 (non_lvalue @0))
2341 (simplify
2342 (shiftrotate integer_zerop@0 @1)
2343 @0)
2344 /* Prefer vector1 << scalar to vector1 << vector2
2345 if vector2 is uniform. */
2346 (for vec (VECTOR_CST CONSTRUCTOR)
2347 (simplify
2348 (shiftrotate @0 vec@1)
2349 (with { tree tem = uniform_vector_p (@1); }
2350 (if (tem)
2351 (shiftrotate @0 { tem; }))))))
2352
165ba2e9
JJ
2353/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2354 Y is 0. Similarly for X >> Y. */
2355#if GIMPLE
2356(for shift (lshift rshift)
2357 (simplify
2358 (shift @0 SSA_NAME@1)
2359 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2360 (with {
2361 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2362 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2363 }
2364 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2365 @0)))))
2366#endif
2367
a7f24614
RB
2368/* Rewrite an LROTATE_EXPR by a constant into an
2369 RROTATE_EXPR by a new constant. */
2370(simplify
2371 (lrotate @0 INTEGER_CST@1)
23f27839 2372 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
2373 build_int_cst (TREE_TYPE (@1),
2374 element_precision (type)), @1); }))
2375
14ea9f92
RB
2376/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2377(for op (lrotate rrotate rshift lshift)
2378 (simplify
2379 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2380 (with { unsigned int prec = element_precision (type); }
8e6cdc90
RS
2381 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2382 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2383 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2384 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
a1488398
RS
2385 (with { unsigned int low = (tree_to_uhwi (@1)
2386 + tree_to_uhwi (@2)); }
14ea9f92
RB
2387 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2388 being well defined. */
2389 (if (low >= prec)
2390 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 2391 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 2392 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
2393 { build_zero_cst (type); }
2394 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2395 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
2396
2397
01ada710
MP
2398/* ((1 << A) & 1) != 0 -> A == 0
2399 ((1 << A) & 1) == 0 -> A != 0 */
2400(for cmp (ne eq)
2401 icmp (eq ne)
2402 (simplify
2403 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2404 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 2405
f2e609c3
MP
2406/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2407 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2408 if CST2 != 0. */
2409(for cmp (ne eq)
2410 (simplify
2411 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
8e6cdc90 2412 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
f2e609c3
MP
2413 (if (cand < 0
2414 || (!integer_zerop (@2)
8e6cdc90 2415 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
8fdc6c67
RB
2416 { constant_boolean_node (cmp == NE_EXPR, type); }
2417 (if (!integer_zerop (@2)
8e6cdc90 2418 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
8fdc6c67 2419 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 2420
1ffbaa3f
RB
2421/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2422 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2423 if the new mask might be further optimized. */
2424(for shift (lshift rshift)
2425 (simplify
44fc0a51
RB
2426 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2427 INTEGER_CST@2)
1ffbaa3f
RB
2428 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2429 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2430 && tree_fits_uhwi_p (@1)
2431 && tree_to_uhwi (@1) > 0
2432 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2433 (with
2434 {
2435 unsigned int shiftc = tree_to_uhwi (@1);
2436 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2437 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2438 tree shift_type = TREE_TYPE (@3);
2439 unsigned int prec;
2440
2441 if (shift == LSHIFT_EXPR)
fecfbfa4 2442 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f 2443 else if (shift == RSHIFT_EXPR
2be65d9e 2444 && type_has_mode_precision_p (shift_type))
1ffbaa3f
RB
2445 {
2446 prec = TYPE_PRECISION (TREE_TYPE (@3));
2447 tree arg00 = @0;
2448 /* See if more bits can be proven as zero because of
2449 zero extension. */
2450 if (@3 != @0
2451 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2452 {
2453 tree inner_type = TREE_TYPE (@0);
2be65d9e 2454 if (type_has_mode_precision_p (inner_type)
1ffbaa3f
RB
2455 && TYPE_PRECISION (inner_type) < prec)
2456 {
2457 prec = TYPE_PRECISION (inner_type);
2458 /* See if we can shorten the right shift. */
2459 if (shiftc < prec)
2460 shift_type = inner_type;
2461 /* Otherwise X >> C1 is all zeros, so we'll optimize
2462 it into (X, 0) later on by making sure zerobits
2463 is all ones. */
2464 }
2465 }
dd4786fe 2466 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
2467 if (shiftc < prec)
2468 {
2469 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2470 zerobits <<= prec - shiftc;
2471 }
2472 /* For arithmetic shift if sign bit could be set, zerobits
2473 can contain actually sign bits, so no transformation is
2474 possible, unless MASK masks them all away. In that
2475 case the shift needs to be converted into logical shift. */
2476 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2477 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2478 {
2479 if ((mask & zerobits) == 0)
2480 shift_type = unsigned_type_for (TREE_TYPE (@3));
2481 else
2482 zerobits = 0;
2483 }
2484 }
2485 }
2486 /* ((X << 16) & 0xff00) is (X, 0). */
2487 (if ((mask & zerobits) == mask)
8fdc6c67
RB
2488 { build_int_cst (type, 0); }
2489 (with { newmask = mask | zerobits; }
2490 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2491 (with
2492 {
2493 /* Only do the transformation if NEWMASK is some integer
2494 mode's mask. */
2495 for (prec = BITS_PER_UNIT;
2496 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 2497 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
2498 break;
2499 }
2500 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 2501 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
2502 (with
2503 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2504 (if (!tree_int_cst_equal (newmaskt, @2))
2505 (if (shift_type != TREE_TYPE (@3))
2506 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2507 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 2508
84ff66b8
AV
2509/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2510 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 2511(for shift (lshift rshift)
84ff66b8
AV
2512 (for bit_op (bit_and bit_xor bit_ior)
2513 (simplify
2514 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2515 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2516 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2517 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 2518
ad1d92ab
MM
2519/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2520(simplify
2521 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2522 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
2523 && (element_precision (TREE_TYPE (@0))
2524 <= element_precision (TREE_TYPE (@1))
2525 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
2526 (with
2527 { tree shift_type = TREE_TYPE (@0); }
2528 (convert (rshift (convert:shift_type @1) @2)))))
2529
2530/* ~(~X >>r Y) -> X >>r Y
2531 ~(~X <<r Y) -> X <<r Y */
2532(for rotate (lrotate rrotate)
2533 (simplify
2534 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
2535 (if ((element_precision (TREE_TYPE (@0))
2536 <= element_precision (TREE_TYPE (@1))
2537 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2538 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2539 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
2540 (with
2541 { tree rotate_type = TREE_TYPE (@0); }
2542 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 2543
d4573ffe
RB
2544/* Simplifications of conversions. */
2545
2546/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 2547(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
2548 (simplify
2549 (cvt @0)
2550 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2551 || (GENERIC && type == TREE_TYPE (@0)))
2552 @0)))
2553
2554/* Contract view-conversions. */
2555(simplify
2556 (view_convert (view_convert @0))
2557 (view_convert @0))
2558
2559/* For integral conversions with the same precision or pointer
2560 conversions use a NOP_EXPR instead. */
2561(simplify
2562 (view_convert @0)
2563 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2564 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2565 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2566 (convert @0)))
2567
bce8ef71
MG
2568/* Strip inner integral conversions that do not change precision or size, or
2569 zero-extend while keeping the same size (for bool-to-char). */
d4573ffe
RB
2570(simplify
2571 (view_convert (convert@0 @1))
2572 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2573 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
bce8ef71
MG
2574 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2575 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2576 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2577 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
d4573ffe
RB
2578 (view_convert @1)))
2579
2580/* Re-association barriers around constants and other re-association
2581 barriers can be removed. */
2582(simplify
2583 (paren CONSTANT_CLASS_P@0)
2584 @0)
2585(simplify
2586 (paren (paren@1 @0))
2587 @1)
1e51d0a2
RB
2588
2589/* Handle cases of two conversions in a row. */
2590(for ocvt (convert float fix_trunc)
2591 (for icvt (convert float)
2592 (simplify
2593 (ocvt (icvt@1 @0))
2594 (with
2595 {
2596 tree inside_type = TREE_TYPE (@0);
2597 tree inter_type = TREE_TYPE (@1);
2598 int inside_int = INTEGRAL_TYPE_P (inside_type);
2599 int inside_ptr = POINTER_TYPE_P (inside_type);
2600 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 2601 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
2602 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2603 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2604 int inter_int = INTEGRAL_TYPE_P (inter_type);
2605 int inter_ptr = POINTER_TYPE_P (inter_type);
2606 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 2607 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
2608 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2609 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2610 int final_int = INTEGRAL_TYPE_P (type);
2611 int final_ptr = POINTER_TYPE_P (type);
2612 int final_float = FLOAT_TYPE_P (type);
09240451 2613 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
2614 unsigned int final_prec = TYPE_PRECISION (type);
2615 int final_unsignedp = TYPE_UNSIGNED (type);
2616 }
64d3a1f0
RB
2617 (switch
2618 /* In addition to the cases of two conversions in a row
2619 handled below, if we are converting something to its own
2620 type via an object of identical or wider precision, neither
2621 conversion is needed. */
2622 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2623 || (GENERIC
2624 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2625 && (((inter_int || inter_ptr) && final_int)
2626 || (inter_float && final_float))
2627 && inter_prec >= final_prec)
2628 (ocvt @0))
2629
2630 /* Likewise, if the intermediate and initial types are either both
2631 float or both integer, we don't need the middle conversion if the
2632 former is wider than the latter and doesn't change the signedness
2633 (for integers). Avoid this if the final type is a pointer since
36088299 2634 then we sometimes need the middle conversion. */
64d3a1f0
RB
2635 (if (((inter_int && inside_int) || (inter_float && inside_float))
2636 && (final_int || final_float)
2637 && inter_prec >= inside_prec
36088299 2638 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
2639 (ocvt @0))
2640
2641 /* If we have a sign-extension of a zero-extended value, we can
2642 replace that by a single zero-extension. Likewise if the
2643 final conversion does not change precision we can drop the
2644 intermediate conversion. */
2645 (if (inside_int && inter_int && final_int
2646 && ((inside_prec < inter_prec && inter_prec < final_prec
2647 && inside_unsignedp && !inter_unsignedp)
2648 || final_prec == inter_prec))
2649 (ocvt @0))
2650
2651 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
2652 - some conversion is floating-point (overstrict for now), or
2653 - some conversion is a vector (overstrict for now), or
2654 - the intermediate type is narrower than both initial and
2655 final, or
2656 - the intermediate type and innermost type differ in signedness,
2657 and the outermost type is wider than the intermediate, or
2658 - the initial type is a pointer type and the precisions of the
2659 intermediate and final types differ, or
2660 - the final type is a pointer type and the precisions of the
2661 initial and intermediate types differ. */
64d3a1f0
RB
2662 (if (! inside_float && ! inter_float && ! final_float
2663 && ! inside_vec && ! inter_vec && ! final_vec
2664 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2665 && ! (inside_int && inter_int
2666 && inter_unsignedp != inside_unsignedp
2667 && inter_prec < final_prec)
2668 && ((inter_unsignedp && inter_prec > inside_prec)
2669 == (final_unsignedp && final_prec > inter_prec))
2670 && ! (inside_ptr && inter_prec != final_prec)
36088299 2671 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
2672 (ocvt @0))
2673
2674 /* A truncation to an unsigned type (a zero-extension) should be
2675 canonicalized as bitwise and of a mask. */
1d510e04
JJ
2676 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2677 && final_int && inter_int && inside_int
64d3a1f0
RB
2678 && final_prec == inside_prec
2679 && final_prec > inter_prec
2680 && inter_unsignedp)
2681 (convert (bit_and @0 { wide_int_to_tree
2682 (inside_type,
2683 wi::mask (inter_prec, false,
2684 TYPE_PRECISION (inside_type))); })))
2685
2686 /* If we are converting an integer to a floating-point that can
2687 represent it exactly and back to an integer, we can skip the
2688 floating-point conversion. */
2689 (if (GIMPLE /* PR66211 */
2690 && inside_int && inter_float && final_int &&
2691 (unsigned) significand_size (TYPE_MODE (inter_type))
2692 >= inside_prec - !inside_unsignedp)
2693 (convert @0)))))))
ea2042ba
RB
2694
2695/* If we have a narrowing conversion to an integral type that is fed by a
2696 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2697 masks off bits outside the final type (and nothing else). */
2698(simplify
2699 (convert (bit_and @0 INTEGER_CST@1))
2700 (if (INTEGRAL_TYPE_P (type)
2701 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2702 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2703 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2704 TYPE_PRECISION (type)), 0))
2705 (convert @0)))
a25454ea
RB
2706
2707
2708/* (X /[ex] A) * A -> X. */
2709(simplify
2eef1fc1
RB
2710 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2711 (convert @0))
eaeba53a 2712
0036218b
MG
2713/* ((X /[ex] A) +- B) * A --> X +- A * B. */
2714(for op (plus minus)
2715 (simplify
2716 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2717 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2718 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2719 (with
2720 {
2721 wi::overflow_type overflow;
2722 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2723 TYPE_SIGN (type), &overflow);
2724 }
2725 (if (types_match (type, TREE_TYPE (@2))
2726 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2727 (op @0 { wide_int_to_tree (type, mul); })
2728 (with { tree utype = unsigned_type_for (type); }
2729 (convert (op (convert:utype @0)
2730 (mult (convert:utype @1) (convert:utype @2))))))))))
2731
a7f24614
RB
2732/* Canonicalization of binary operations. */
2733
2734/* Convert X + -C into X - C. */
2735(simplify
2736 (plus @0 REAL_CST@1)
2737 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 2738 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
2739 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2740 (minus @0 { tem; })))))
2741
6b6aa8d3 2742/* Convert x+x into x*2. */
a7f24614
RB
2743(simplify
2744 (plus @0 @0)
2745 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
2746 (mult @0 { build_real (type, dconst2); })
2747 (if (INTEGRAL_TYPE_P (type))
2748 (mult @0 { build_int_cst (type, 2); }))))
a7f24614 2749
406520e2 2750/* 0 - X -> -X. */
a7f24614
RB
2751(simplify
2752 (minus integer_zerop @1)
2753 (negate @1))
406520e2
MG
2754(simplify
2755 (pointer_diff integer_zerop @1)
2756 (negate (convert @1)))
a7f24614
RB
2757
2758/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2759 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2760 (-ARG1 + ARG0) reduces to -ARG1. */
2761(simplify
2762 (minus real_zerop@0 @1)
2763 (if (fold_real_zero_addition_p (type, @0, 0))
2764 (negate @1)))
2765
2766/* Transform x * -1 into -x. */
2767(simplify
2768 (mult @0 integer_minus_onep)
2769 (negate @0))
eaeba53a 2770
b771c609
AM
2771/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2772 signed overflow for CST != 0 && CST != -1. */
2773(simplify
b46ebc6c 2774 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
b771c609 2775 (if (TREE_CODE (@2) != INTEGER_CST
b46ebc6c 2776 && single_use (@3)
b771c609
AM
2777 && !integer_zerop (@1) && !integer_minus_onep (@1))
2778 (mult (mult @0 @2) @1)))
2779
96285749
RS
2780/* True if we can easily extract the real and imaginary parts of a complex
2781 number. */
2782(match compositional_complex
2783 (convert? (complex @0 @1)))
2784
eaeba53a
RB
2785/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2786(simplify
2787 (complex (realpart @0) (imagpart @0))
2788 @0)
2789(simplify
2790 (realpart (complex @0 @1))
2791 @0)
2792(simplify
2793 (imagpart (complex @0 @1))
2794 @1)
83633539 2795
77c028c5
MG
2796/* Sometimes we only care about half of a complex expression. */
2797(simplify
2798 (realpart (convert?:s (conj:s @0)))
2799 (convert (realpart @0)))
2800(simplify
2801 (imagpart (convert?:s (conj:s @0)))
2802 (convert (negate (imagpart @0))))
2803(for part (realpart imagpart)
2804 (for op (plus minus)
2805 (simplify
2806 (part (convert?:s@2 (op:s @0 @1)))
2807 (convert (op (part @0) (part @1))))))
2808(simplify
2809 (realpart (convert?:s (CEXPI:s @0)))
2810 (convert (COS @0)))
2811(simplify
2812 (imagpart (convert?:s (CEXPI:s @0)))
2813 (convert (SIN @0)))
2814
2815/* conj(conj(x)) -> x */
2816(simplify
2817 (conj (convert? (conj @0)))
2818 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2819 (convert @0)))
2820
2821/* conj({x,y}) -> {x,-y} */
2822(simplify
2823 (conj (convert?:s (complex:s @0 @1)))
2824 (with { tree itype = TREE_TYPE (type); }
2825 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
2826
2827/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2828(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2829 (simplify
2830 (bswap (bswap @0))
2831 @0)
2832 (simplify
2833 (bswap (bit_not (bswap @0)))
2834 (bit_not @0))
2835 (for bitop (bit_xor bit_ior bit_and)
2836 (simplify
2837 (bswap (bitop:c (bswap @0) @1))
2838 (bitop @0 (bswap @1)))))
96994de0
RB
2839
2840
2841/* Combine COND_EXPRs and VEC_COND_EXPRs. */
2842
2843/* Simplify constant conditions.
2844 Only optimize constant conditions when the selected branch
2845 has the same type as the COND_EXPR. This avoids optimizing
2846 away "c ? x : throw", where the throw has a void type.
2847 Note that we cannot throw away the fold-const.c variant nor
2848 this one as we depend on doing this transform before possibly
2849 A ? B : B -> B triggers and the fold-const.c one can optimize
2850 0 ? A : B to B even if A has side-effects. Something
2851 genmatch cannot handle. */
2852(simplify
2853 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
2854 (if (integer_zerop (@0))
2855 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2856 @2)
2857 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2858 @1)))
96994de0
RB
2859(simplify
2860 (vec_cond VECTOR_CST@0 @1 @2)
2861 (if (integer_all_onesp (@0))
8fdc6c67
RB
2862 @1
2863 (if (integer_zerop (@0))
2864 @2)))
96994de0 2865
b5481987
BC
2866/* Simplification moved from fold_cond_expr_with_comparison. It may also
2867 be extended. */
e2535011
BC
2868/* This pattern implements two kinds simplification:
2869
2870 Case 1)
2871 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
b5481987
BC
2872 1) Conversions are type widening from smaller type.
2873 2) Const c1 equals to c2 after canonicalizing comparison.
2874 3) Comparison has tree code LT, LE, GT or GE.
2875 This specific pattern is needed when (cmp (convert x) c) may not
2876 be simplified by comparison patterns because of multiple uses of
2877 x. It also makes sense here because simplifying across multiple
e2535011
BC
2878 referred var is always benefitial for complicated cases.
2879
2880 Case 2)
2881 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2882(for cmp (lt le gt ge eq)
b5481987 2883 (simplify
ae22bc5d 2884 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
b5481987
BC
2885 (with
2886 {
2887 tree from_type = TREE_TYPE (@1);
2888 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
ae22bc5d 2889 enum tree_code code = ERROR_MARK;
b5481987 2890
ae22bc5d
BC
2891 if (INTEGRAL_TYPE_P (from_type)
2892 && int_fits_type_p (@2, from_type)
b5481987
BC
2893 && (types_match (c1_type, from_type)
2894 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2895 && (TYPE_UNSIGNED (from_type)
2896 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2897 && (types_match (c2_type, from_type)
2898 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2899 && (TYPE_UNSIGNED (from_type)
2900 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2901 {
ae22bc5d 2902 if (cmp != EQ_EXPR)
b5481987 2903 {
e2535011
BC
2904 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2905 {
2906 /* X <= Y - 1 equals to X < Y. */
ae22bc5d 2907 if (cmp == LE_EXPR)
e2535011
BC
2908 code = LT_EXPR;
2909 /* X > Y - 1 equals to X >= Y. */
ae22bc5d 2910 if (cmp == GT_EXPR)
e2535011
BC
2911 code = GE_EXPR;
2912 }
2913 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2914 {
2915 /* X < Y + 1 equals to X <= Y. */
ae22bc5d 2916 if (cmp == LT_EXPR)
e2535011
BC
2917 code = LE_EXPR;
2918 /* X >= Y + 1 equals to X > Y. */
ae22bc5d 2919 if (cmp == GE_EXPR)
e2535011
BC
2920 code = GT_EXPR;
2921 }
ae22bc5d
BC
2922 if (code != ERROR_MARK
2923 || wi::to_widest (@2) == wi::to_widest (@3))
e2535011 2924 {
ae22bc5d 2925 if (cmp == LT_EXPR || cmp == LE_EXPR)
e2535011 2926 code = MIN_EXPR;
ae22bc5d 2927 if (cmp == GT_EXPR || cmp == GE_EXPR)
e2535011
BC
2928 code = MAX_EXPR;
2929 }
b5481987 2930 }
e2535011 2931 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
ae22bc5d
BC
2932 else if (int_fits_type_p (@3, from_type))
2933 code = EQ_EXPR;
b5481987
BC
2934 }
2935 }
2936 (if (code == MAX_EXPR)
21aaaf1e 2937 (convert (max @1 (convert @2)))
b5481987 2938 (if (code == MIN_EXPR)
21aaaf1e 2939 (convert (min @1 (convert @2)))
e2535011 2940 (if (code == EQ_EXPR)
ae22bc5d 2941 (convert (cond (eq @1 (convert @3))
21aaaf1e 2942 (convert:from_type @3) (convert:from_type @2)))))))))
b5481987 2943
714445ae
BC
2944/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2945
2946 1) OP is PLUS or MINUS.
2947 2) CMP is LT, LE, GT or GE.
2948 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2949
2950 This pattern also handles special cases like:
2951
2952 A) Operand x is a unsigned to signed type conversion and c1 is
2953 integer zero. In this case,
2954 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2955 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2956 B) Const c1 may not equal to (C3 op' C2). In this case we also
2957 check equality for (c1+1) and (c1-1) by adjusting comparison
2958 code.
2959
2960 TODO: Though signed type is handled by this pattern, it cannot be
2961 simplified at the moment because C standard requires additional
2962 type promotion. In order to match&simplify it here, the IR needs
2963 to be cleaned up by other optimizers, i.e, VRP. */
2964(for op (plus minus)
2965 (for cmp (lt le gt ge)
2966 (simplify
2967 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2968 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2969 (if (types_match (from_type, to_type)
2970 /* Check if it is special case A). */
2971 || (TYPE_UNSIGNED (from_type)
2972 && !TYPE_UNSIGNED (to_type)
2973 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2974 && integer_zerop (@1)
2975 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2976 (with
2977 {
4a669ac3 2978 wi::overflow_type overflow = wi::OVF_NONE;
714445ae 2979 enum tree_code code, cmp_code = cmp;
8e6cdc90
RS
2980 wide_int real_c1;
2981 wide_int c1 = wi::to_wide (@1);
2982 wide_int c2 = wi::to_wide (@2);
2983 wide_int c3 = wi::to_wide (@3);
714445ae
BC
2984 signop sgn = TYPE_SIGN (from_type);
2985
2986 /* Handle special case A), given x of unsigned type:
2987 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2988 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2989 if (!types_match (from_type, to_type))
2990 {
2991 if (cmp_code == LT_EXPR)
2992 cmp_code = GT_EXPR;
2993 if (cmp_code == GE_EXPR)
2994 cmp_code = LE_EXPR;
2995 c1 = wi::max_value (to_type);
2996 }
2997 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2998 compute (c3 op' c2) and check if it equals to c1 with op' being
2999 the inverted operator of op. Make sure overflow doesn't happen
3000 if it is undefined. */
3001 if (op == PLUS_EXPR)
3002 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3003 else
3004 real_c1 = wi::add (c3, c2, sgn, &overflow);
3005
3006 code = cmp_code;
3007 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3008 {
3009 /* Check if c1 equals to real_c1. Boundary condition is handled
3010 by adjusting comparison operation if necessary. */
3011 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3012 && !overflow)
3013 {
3014 /* X <= Y - 1 equals to X < Y. */
3015 if (cmp_code == LE_EXPR)
3016 code = LT_EXPR;
3017 /* X > Y - 1 equals to X >= Y. */
3018 if (cmp_code == GT_EXPR)
3019 code = GE_EXPR;
3020 }
3021 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3022 && !overflow)
3023 {
3024 /* X < Y + 1 equals to X <= Y. */
3025 if (cmp_code == LT_EXPR)
3026 code = LE_EXPR;
3027 /* X >= Y + 1 equals to X > Y. */
3028 if (cmp_code == GE_EXPR)
3029 code = GT_EXPR;
3030 }
3031 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3032 {
3033 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3034 code = MIN_EXPR;
3035 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3036 code = MAX_EXPR;
3037 }
3038 }
3039 }
3040 (if (code == MAX_EXPR)
3041 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3042 { wide_int_to_tree (from_type, c2); })
3043 (if (code == MIN_EXPR)
3044 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3045 { wide_int_to_tree (from_type, c2); })))))))))
3046
96994de0
RB
3047(for cnd (cond vec_cond)
3048 /* A ? B : (A ? X : C) -> A ? B : C. */
3049 (simplify
3050 (cnd @0 (cnd @0 @1 @2) @3)
3051 (cnd @0 @1 @3))
3052 (simplify
3053 (cnd @0 @1 (cnd @0 @2 @3))
3054 (cnd @0 @1 @3))
24a179f8
RB
3055 /* A ? B : (!A ? C : X) -> A ? B : C. */
3056 /* ??? This matches embedded conditions open-coded because genmatch
3057 would generate matching code for conditions in separate stmts only.
3058 The following is still important to merge then and else arm cases
3059 from if-conversion. */
3060 (simplify
3061 (cnd @0 @1 (cnd @2 @3 @4))
2c58d42c 3062 (if (inverse_conditions_p (@0, @2))
24a179f8
RB
3063 (cnd @0 @1 @3)))
3064 (simplify
3065 (cnd @0 (cnd @1 @2 @3) @4)
2c58d42c 3066 (if (inverse_conditions_p (@0, @1))
24a179f8 3067 (cnd @0 @3 @4)))
96994de0
RB
3068
3069 /* A ? B : B -> B. */
3070 (simplify
3071 (cnd @0 @1 @1)
09240451 3072 @1)
96994de0 3073
09240451
MG
3074 /* !A ? B : C -> A ? C : B. */
3075 (simplify
3076 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3077 (cnd @0 @2 @1)))
f84e7fd6 3078
a3ca1bc5
RB
3079/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3080 return all -1 or all 0 results. */
f43d102e
RS
3081/* ??? We could instead convert all instances of the vec_cond to negate,
3082 but that isn't necessarily a win on its own. */
3083(simplify
a3ca1bc5 3084 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3085 (if (VECTOR_TYPE_P (type)
928686b1
RS
3086 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3087 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3088 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3089 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3090 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 3091
a3ca1bc5 3092/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 3093(simplify
a3ca1bc5 3094 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3095 (if (VECTOR_TYPE_P (type)
928686b1
RS
3096 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3097 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3098 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3099 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3100 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 3101
2ee05f1e 3102
f84e7fd6
RB
3103/* Simplifications of comparisons. */
3104
24f1db9c
RB
3105/* See if we can reduce the magnitude of a constant involved in a
3106 comparison by changing the comparison code. This is a canonicalization
3107 formerly done by maybe_canonicalize_comparison_1. */
3108(for cmp (le gt)
3109 acmp (lt ge)
3110 (simplify
f06e47d7
JJ
3111 (cmp @0 uniform_integer_cst_p@1)
3112 (with { tree cst = uniform_integer_cst_p (@1); }
3113 (if (tree_int_cst_sgn (cst) == -1)
3114 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3115 wide_int_to_tree (TREE_TYPE (cst),
3116 wi::to_wide (cst)
3117 + 1)); })))))
24f1db9c
RB
3118(for cmp (ge lt)
3119 acmp (gt le)
3120 (simplify
f06e47d7
JJ
3121 (cmp @0 uniform_integer_cst_p@1)
3122 (with { tree cst = uniform_integer_cst_p (@1); }
3123 (if (tree_int_cst_sgn (cst) == 1)
3124 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3125 wide_int_to_tree (TREE_TYPE (cst),
3126 wi::to_wide (cst) - 1)); })))))
24f1db9c 3127
f84e7fd6
RB
3128/* We can simplify a logical negation of a comparison to the
3129 inverted comparison. As we cannot compute an expression
3130 operator using invert_tree_comparison we have to simulate
3131 that with expression code iteration. */
3132(for cmp (tcc_comparison)
3133 icmp (inverted_tcc_comparison)
3134 ncmp (inverted_tcc_comparison_with_nans)
3135 /* Ideally we'd like to combine the following two patterns
3136 and handle some more cases by using
3137 (logical_inverted_value (cmp @0 @1))
3138 here but for that genmatch would need to "inline" that.
3139 For now implement what forward_propagate_comparison did. */
3140 (simplify
3141 (bit_not (cmp @0 @1))
3142 (if (VECTOR_TYPE_P (type)
3143 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3144 /* Comparison inversion may be impossible for trapping math,
3145 invert_tree_comparison will tell us. But we can't use
3146 a computed operator in the replacement tree thus we have
3147 to play the trick below. */
3148 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3149 (cmp, HONOR_NANS (@0)); }
f84e7fd6 3150 (if (ic == icmp)
8fdc6c67
RB
3151 (icmp @0 @1)
3152 (if (ic == ncmp)
3153 (ncmp @0 @1))))))
f84e7fd6 3154 (simplify
09240451
MG
3155 (bit_xor (cmp @0 @1) integer_truep)
3156 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3157 (cmp, HONOR_NANS (@0)); }
09240451 3158 (if (ic == icmp)
8fdc6c67
RB
3159 (icmp @0 @1)
3160 (if (ic == ncmp)
3161 (ncmp @0 @1))))))
e18c1d66 3162
2ee05f1e
RB
3163/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3164 ??? The transformation is valid for the other operators if overflow
3165 is undefined for the type, but performing it here badly interacts
3166 with the transformation in fold_cond_expr_with_comparison which
3167 attempts to synthetize ABS_EXPR. */
3168(for cmp (eq ne)
1af4ebf5
MG
3169 (for sub (minus pointer_diff)
3170 (simplify
3171 (cmp (sub@2 @0 @1) integer_zerop)
3172 (if (single_use (@2))
3173 (cmp @0 @1)))))
2ee05f1e
RB
3174
3175/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3176 signed arithmetic case. That form is created by the compiler
3177 often enough for folding it to be of value. One example is in
3178 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
3179(for cmp (simple_comparison)
3180 scmp (swapped_simple_comparison)
2ee05f1e 3181 (simplify
bc6e9db4 3182 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
3183 /* Handle unfolded multiplication by zero. */
3184 (if (integer_zerop (@1))
8fdc6c67
RB
3185 (cmp @1 @2)
3186 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
3187 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3188 && single_use (@3))
8fdc6c67
RB
3189 /* If @1 is negative we swap the sense of the comparison. */
3190 (if (tree_int_cst_sgn (@1) < 0)
3191 (scmp @0 @2)
3192 (cmp @0 @2))))))
03cc70b5 3193
2ee05f1e
RB
3194/* Simplify comparison of something with itself. For IEEE
3195 floating-point, we can only do some of these simplifications. */
287f8f17 3196(for cmp (eq ge le)
2ee05f1e
RB
3197 (simplify
3198 (cmp @0 @0)
287f8f17 3199 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3200 || ! HONOR_NANS (@0))
287f8f17
RB
3201 { constant_boolean_node (true, type); }
3202 (if (cmp != EQ_EXPR)
3203 (eq @0 @0)))))
2ee05f1e
RB
3204(for cmp (ne gt lt)
3205 (simplify
3206 (cmp @0 @0)
3207 (if (cmp != NE_EXPR
3208 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3209 || ! HONOR_NANS (@0))
2ee05f1e 3210 { constant_boolean_node (false, type); })))
b5d3d787
RB
3211(for cmp (unle unge uneq)
3212 (simplify
3213 (cmp @0 @0)
3214 { constant_boolean_node (true, type); }))
dd53d197
MG
3215(for cmp (unlt ungt)
3216 (simplify
3217 (cmp @0 @0)
3218 (unordered @0 @0)))
b5d3d787
RB
3219(simplify
3220 (ltgt @0 @0)
3221 (if (!flag_trapping_math)
3222 { constant_boolean_node (false, type); }))
2ee05f1e
RB
3223
3224/* Fold ~X op ~Y as Y op X. */
07cdc2b8 3225(for cmp (simple_comparison)
2ee05f1e 3226 (simplify
7fe996ba
RB
3227 (cmp (bit_not@2 @0) (bit_not@3 @1))
3228 (if (single_use (@2) && single_use (@3))
3229 (cmp @1 @0))))
2ee05f1e
RB
3230
3231/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
3232(for cmp (simple_comparison)
3233 scmp (swapped_simple_comparison)
2ee05f1e 3234 (simplify
7fe996ba
RB
3235 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3236 (if (single_use (@2)
3237 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
3238 (scmp @0 (bit_not @1)))))
3239
07cdc2b8
RB
3240(for cmp (simple_comparison)
3241 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3242 (simplify
3243 (cmp (convert@2 @0) (convert? @1))
3244 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3245 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3246 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3247 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3248 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3249 (with
3250 {
3251 tree type1 = TREE_TYPE (@1);
3252 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3253 {
3254 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3255 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3256 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3257 type1 = float_type_node;
3258 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3259 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3260 type1 = double_type_node;
3261 }
3262 tree newtype
3263 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
03cc70b5 3264 ? TREE_TYPE (@0) : type1);
07cdc2b8
RB
3265 }
3266 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3267 (cmp (convert:newtype @0) (convert:newtype @1))))))
03cc70b5 3268
07cdc2b8
RB
3269 (simplify
3270 (cmp @0 REAL_CST@1)
3271 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
3272 (switch
3273 /* a CMP (-0) -> a CMP 0 */
3274 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3275 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3276 /* x != NaN is always true, other ops are always false. */
3277 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3278 && ! HONOR_SNANS (@1))
3279 { constant_boolean_node (cmp == NE_EXPR, type); })
3280 /* Fold comparisons against infinity. */
3281 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3282 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3283 (with
3284 {
3285 REAL_VALUE_TYPE max;
3286 enum tree_code code = cmp;
3287 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3288 if (neg)
3289 code = swap_tree_comparison (code);
3290 }
3291 (switch
e96a5786 3292 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
64d3a1f0 3293 (if (code == GT_EXPR
e96a5786 3294 && !(HONOR_NANS (@0) && flag_trapping_math))
64d3a1f0
RB
3295 { constant_boolean_node (false, type); })
3296 (if (code == LE_EXPR)
e96a5786 3297 /* x <= +Inf is always true, if we don't care about NaNs. */
64d3a1f0
RB
3298 (if (! HONOR_NANS (@0))
3299 { constant_boolean_node (true, type); }
e96a5786
JM
3300 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3301 an "invalid" exception. */
3302 (if (!flag_trapping_math)
3303 (eq @0 @0))))
3304 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3305 for == this introduces an exception for x a NaN. */
3306 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3307 || code == GE_EXPR)
64d3a1f0
RB
3308 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3309 (if (neg)
3310 (lt @0 { build_real (TREE_TYPE (@0), max); })
3311 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3312 /* x < +Inf is always equal to x <= DBL_MAX. */
3313 (if (code == LT_EXPR)
3314 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3315 (if (neg)
3316 (ge @0 { build_real (TREE_TYPE (@0), max); })
3317 (le @0 { build_real (TREE_TYPE (@0), max); }))))
e96a5786
JM
3318 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3319 an exception for x a NaN so use an unordered comparison. */
64d3a1f0
RB
3320 (if (code == NE_EXPR)
3321 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3322 (if (! HONOR_NANS (@0))
3323 (if (neg)
3324 (ge @0 { build_real (TREE_TYPE (@0), max); })
3325 (le @0 { build_real (TREE_TYPE (@0), max); }))
3326 (if (neg)
e96a5786
JM
3327 (unge @0 { build_real (TREE_TYPE (@0), max); })
3328 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
07cdc2b8
RB
3329
3330 /* If this is a comparison of a real constant with a PLUS_EXPR
3331 or a MINUS_EXPR of a real constant, we can convert it into a
3332 comparison with a revised real constant as long as no overflow
3333 occurs when unsafe_math_optimizations are enabled. */
3334 (if (flag_unsafe_math_optimizations)
3335 (for op (plus minus)
3336 (simplify
3337 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3338 (with
3339 {
3340 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3341 TREE_TYPE (@1), @2, @1);
3342 }
f980c9a2 3343 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
3344 (cmp @0 { tem; }))))))
3345
3346 /* Likewise, we can simplify a comparison of a real constant with
3347 a MINUS_EXPR whose first operand is also a real constant, i.e.
3348 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3349 floating-point types only if -fassociative-math is set. */
3350 (if (flag_associative_math)
3351 (simplify
0409237b 3352 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 3353 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 3354 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
3355 (cmp { tem; } @1)))))
3356
3357 /* Fold comparisons against built-in math functions. */
3358 (if (flag_unsafe_math_optimizations
3359 && ! flag_errno_math)
3360 (for sq (SQRT)
3361 (simplify
3362 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
3363 (switch
3364 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3365 (switch
3366 /* sqrt(x) < y is always false, if y is negative. */
3367 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 3368 { constant_boolean_node (false, type); })
64d3a1f0
RB
3369 /* sqrt(x) > y is always true, if y is negative and we
3370 don't care about NaNs, i.e. negative values of x. */
3371 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3372 { constant_boolean_node (true, type); })
3373 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3374 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
3375 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3376 (switch
3377 /* sqrt(x) < 0 is always false. */
3378 (if (cmp == LT_EXPR)
3379 { constant_boolean_node (false, type); })
3380 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3381 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3382 { constant_boolean_node (true, type); })
3383 /* sqrt(x) <= 0 -> x == 0. */
3384 (if (cmp == LE_EXPR)
3385 (eq @0 @1))
3386 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3387 == or !=. In the last case:
3388
3389 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3390
3391 if x is negative or NaN. Due to -funsafe-math-optimizations,
3392 the results for other x follow from natural arithmetic. */
3393 (cmp @0 @1)))
64d3a1f0
RB
3394 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3395 (with
3396 {
3397 REAL_VALUE_TYPE c2;
5c88ea94
RS
3398 real_arithmetic (&c2, MULT_EXPR,
3399 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
3400 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3401 }
3402 (if (REAL_VALUE_ISINF (c2))
3403 /* sqrt(x) > y is x == +Inf, when y is very large. */
3404 (if (HONOR_INFINITIES (@0))
3405 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3406 { constant_boolean_node (false, type); })
3407 /* sqrt(x) > c is the same as x > c*c. */
3408 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3409 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3410 (with
3411 {
3412 REAL_VALUE_TYPE c2;
5c88ea94
RS
3413 real_arithmetic (&c2, MULT_EXPR,
3414 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
3415 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3416 }
3417 (if (REAL_VALUE_ISINF (c2))
3418 (switch
3419 /* sqrt(x) < y is always true, when y is a very large
3420 value and we don't care about NaNs or Infinities. */
3421 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3422 { constant_boolean_node (true, type); })
3423 /* sqrt(x) < y is x != +Inf when y is very large and we
3424 don't care about NaNs. */
3425 (if (! HONOR_NANS (@0))
3426 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3427 /* sqrt(x) < y is x >= 0 when y is very large and we
3428 don't care about Infinities. */
3429 (if (! HONOR_INFINITIES (@0))
3430 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3431 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3432 (if (GENERIC)
3433 (truth_andif
3434 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3435 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3436 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3437 (if (! HONOR_NANS (@0))
3438 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3439 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3440 (if (GENERIC)
3441 (truth_andif
3442 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
0ca2e7f7
PK
3443 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3444 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3445 (simplify
3446 (cmp (sq @0) (sq @1))
3447 (if (! HONOR_NANS (@0))
3448 (cmp @0 @1))))))
2ee05f1e 3449
e41ec71b 3450/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
f3842847
YG
3451(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3452 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
e41ec71b
YG
3453 (simplify
3454 (cmp (float@0 @1) (float @2))
3455 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3456 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3457 (with
3458 {
3459 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3460 tree type1 = TREE_TYPE (@1);
3461 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3462 tree type2 = TREE_TYPE (@2);
3463 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3464 }
3465 (if (fmt.can_represent_integral_type_p (type1)
3466 && fmt.can_represent_integral_type_p (type2))
f3842847
YG
3467 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3468 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3469 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3470 && type1_signed_p >= type2_signed_p)
3471 (icmp @1 (convert @2))
3472 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3473 && type1_signed_p <= type2_signed_p)
3474 (icmp (convert:type2 @1) @2)
3475 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3476 && type1_signed_p == type2_signed_p)
3477 (icmp @1 @2))))))))))
e41ec71b 3478
c779bea5
YG
3479/* Optimize various special cases of (FTYPE) N CMP CST. */
3480(for cmp (lt le eq ne ge gt)
3481 icmp (le le eq ne ge ge)
3482 (simplify
3483 (cmp (float @0) REAL_CST@1)
3484 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3485 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3486 (with
3487 {
3488 tree itype = TREE_TYPE (@0);
c779bea5
YG
3489 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3490 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3491 /* Be careful to preserve any potential exceptions due to
3492 NaNs. qNaNs are ok in == or != context.
3493 TODO: relax under -fno-trapping-math or
3494 -fno-signaling-nans. */
3495 bool exception_p
3496 = real_isnan (cst) && (cst->signalling
c651dca2 3497 || (cmp != EQ_EXPR && cmp != NE_EXPR));
c779bea5
YG
3498 }
3499 /* TODO: allow non-fitting itype and SNaNs when
3500 -fno-trapping-math. */
e41ec71b 3501 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
c779bea5
YG
3502 (with
3503 {
e41ec71b 3504 signop isign = TYPE_SIGN (itype);
c779bea5
YG
3505 REAL_VALUE_TYPE imin, imax;
3506 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3507 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3508
3509 REAL_VALUE_TYPE icst;
3510 if (cmp == GT_EXPR || cmp == GE_EXPR)
3511 real_ceil (&icst, fmt, cst);
3512 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3513 real_floor (&icst, fmt, cst);
3514 else
3515 real_trunc (&icst, fmt, cst);
3516
b09bf97b 3517 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
c779bea5
YG
3518
3519 bool overflow_p = false;
3520 wide_int icst_val
3521 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3522 }
3523 (switch
3524 /* Optimize cases when CST is outside of ITYPE's range. */
3525 (if (real_compare (LT_EXPR, cst, &imin))
3526 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3527 type); })
3528 (if (real_compare (GT_EXPR, cst, &imax))
3529 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3530 type); })
3531 /* Remove cast if CST is an integer representable by ITYPE. */
3532 (if (cst_int_p)
3533 (cmp @0 { gcc_assert (!overflow_p);
3534 wide_int_to_tree (itype, icst_val); })
3535 )
3536 /* When CST is fractional, optimize
3537 (FTYPE) N == CST -> 0
3538 (FTYPE) N != CST -> 1. */
3539 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
03cc70b5 3540 { constant_boolean_node (cmp == NE_EXPR, type); })
c779bea5
YG
3541 /* Otherwise replace with sensible integer constant. */
3542 (with
3543 {
3544 gcc_checking_assert (!overflow_p);
3545 }
3546 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3547
40fd269a
MG
3548/* Fold A /[ex] B CMP C to A CMP B * C. */
3549(for cmp (eq ne)
3550 (simplify
3551 (cmp (exact_div @0 @1) INTEGER_CST@2)
3552 (if (!integer_zerop (@1))
8e6cdc90 3553 (if (wi::to_wide (@2) == 0)
40fd269a
MG
3554 (cmp @0 @2)
3555 (if (TREE_CODE (@1) == INTEGER_CST)
3556 (with
3557 {
4a669ac3 3558 wi::overflow_type ovf;
8e6cdc90
RS
3559 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3560 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
3561 }
3562 (if (ovf)
3563 { constant_boolean_node (cmp == NE_EXPR, type); }
3564 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3565(for cmp (lt le gt ge)
3566 (simplify
3567 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90 3568 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
40fd269a
MG
3569 (with
3570 {
4a669ac3 3571 wi::overflow_type ovf;
8e6cdc90
RS
3572 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3573 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
3574 }
3575 (if (ovf)
8e6cdc90
RS
3576 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3577 TYPE_SIGN (TREE_TYPE (@2)))
40fd269a
MG
3578 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3579 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3580
cfdc4f33
MG
3581/* Unordered tests if either argument is a NaN. */
3582(simplify
3583 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 3584 (if (types_match (@0, @1))
cfdc4f33 3585 (unordered @0 @1)))
257b01ba
MG
3586(simplify
3587 (bit_and (ordered @0 @0) (ordered @1 @1))
3588 (if (types_match (@0, @1))
3589 (ordered @0 @1)))
cfdc4f33
MG
3590(simplify
3591 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3592 @2)
257b01ba
MG
3593(simplify
3594 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3595 @2)
e18c1d66 3596
90c6f26c
RB
3597/* Simple range test simplifications. */
3598/* A < B || A >= B -> true. */
5d30c58d
RB
3599(for test1 (lt le le le ne ge)
3600 test2 (ge gt ge ne eq ne)
90c6f26c
RB
3601 (simplify
3602 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3603 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3604 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3605 { constant_boolean_node (true, type); })))
3606/* A < B && A >= B -> false. */
3607(for test1 (lt lt lt le ne eq)
3608 test2 (ge gt eq gt eq gt)
3609 (simplify
3610 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3611 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3612 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3613 { constant_boolean_node (false, type); })))
3614
9ebc3467
YG
3615/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3616 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3617
3618 Note that comparisons
3619 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3620 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3621 will be canonicalized to above so there's no need to
3622 consider them here.
3623 */
3624
3625(for cmp (le gt)
3626 eqcmp (eq ne)
3627 (simplify
3628 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3629 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3630 (with
3631 {
3632 tree ty = TREE_TYPE (@0);
3633 unsigned prec = TYPE_PRECISION (ty);
3634 wide_int mask = wi::to_wide (@2, prec);
3635 wide_int rhs = wi::to_wide (@3, prec);
3636 signop sgn = TYPE_SIGN (ty);
3637 }
3638 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3639 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3640 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3641 { build_zero_cst (ty); }))))))
3642
534bd33b
MG
3643/* -A CMP -B -> B CMP A. */
3644(for cmp (tcc_comparison)
3645 scmp (swapped_tcc_comparison)
3646 (simplify
3647 (cmp (negate @0) (negate @1))
3648 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3649 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3650 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3651 (scmp @0 @1)))
3652 (simplify
3653 (cmp (negate @0) CONSTANT_CLASS_P@1)
3654 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3655 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3656 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 3657 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
3658 (if (tem && !TREE_OVERFLOW (tem))
3659 (scmp @0 { tem; }))))))
3660
b0eb889b
MG
3661/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3662(for op (eq ne)
3663 (simplify
3664 (op (abs @0) zerop@1)
3665 (op @0 @1)))
3666
6358a676
MG
3667/* From fold_sign_changed_comparison and fold_widened_comparison.
3668 FIXME: the lack of symmetry is disturbing. */
79d4f7c6
RB
3669(for cmp (simple_comparison)
3670 (simplify
3671 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 3672 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
3673 /* Disable this optimization if we're casting a function pointer
3674 type on targets that require function pointer canonicalization. */
3675 && !(targetm.have_canonicalize_funcptr_for_compare ()
400bc526
JDA
3676 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3677 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3678 || (POINTER_TYPE_P (TREE_TYPE (@10))
3679 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
2fde61e3 3680 && single_use (@0))
79d4f7c6
RB
3681 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3682 && (TREE_CODE (@10) == INTEGER_CST
6358a676 3683 || @1 != @10)
79d4f7c6
RB
3684 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3685 || cmp == NE_EXPR
3686 || cmp == EQ_EXPR)
6358a676 3687 && !POINTER_TYPE_P (TREE_TYPE (@00)))
79d4f7c6
RB
3688 /* ??? The special-casing of INTEGER_CST conversion was in the original
3689 code and here to avoid a spurious overflow flag on the resulting
3690 constant which fold_convert produces. */
3691 (if (TREE_CODE (@1) == INTEGER_CST)
3692 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3693 TREE_OVERFLOW (@1)); })
3694 (cmp @00 (convert @1)))
3695
3696 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3697 /* If possible, express the comparison in the shorter mode. */
3698 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
3699 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3700 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3701 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
3702 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3703 || ((TYPE_PRECISION (TREE_TYPE (@00))
3704 >= TYPE_PRECISION (TREE_TYPE (@10)))
3705 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3706 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3707 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 3708 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
3709 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3710 (cmp @00 (convert @10))
3711 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 3712 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
3713 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3714 (with
3715 {
3716 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3717 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3718 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3719 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3720 }
3721 (if (above || below)
3722 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3723 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3724 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3725 { constant_boolean_node (above ? true : false, type); }
3726 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3727 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 3728
96a111a3
RB
3729(for cmp (eq ne)
3730 /* A local variable can never be pointed to by
3731 the default SSA name of an incoming parameter.
3732 SSA names are canonicalized to 2nd place. */
3733 (simplify
3734 (cmp addr@0 SSA_NAME@1)
3735 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3736 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3737 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3738 (if (TREE_CODE (base) == VAR_DECL
3739 && auto_var_in_fn_p (base, current_function_decl))
3740 (if (cmp == NE_EXPR)
3741 { constant_boolean_node (true, type); }
3742 { constant_boolean_node (false, type); }))))))
3743
66e1cacf
RB
3744/* Equality compare simplifications from fold_binary */
3745(for cmp (eq ne)
3746
3747 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3748 Similarly for NE_EXPR. */
3749 (simplify
3750 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3751 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
8e6cdc90 3752 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
66e1cacf
RB
3753 { constant_boolean_node (cmp == NE_EXPR, type); }))
3754
3755 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3756 (simplify
3757 (cmp (bit_xor @0 @1) integer_zerop)
3758 (cmp @0 @1))
3759
3760 /* (X ^ Y) == Y becomes X == 0.
3761 Likewise (X ^ Y) == X becomes Y == 0. */
3762 (simplify
99e943a2 3763 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
3764 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3765
3766 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3767 (simplify
3768 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3769 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 3770 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
3771
3772 (simplify
3773 (cmp (convert? addr@0) integer_zerop)
3774 (if (tree_single_nonzero_warnv_p (@0, NULL))
3775 { constant_boolean_node (cmp == NE_EXPR, type); })))
3776
b0eb889b
MG
3777/* If we have (A & C) == C where C is a power of 2, convert this into
3778 (A & C) != 0. Similarly for NE_EXPR. */
3779(for cmp (eq ne)
3780 icmp (ne eq)
3781 (simplify
3782 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3783 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
03cc70b5 3784
519e0faa
PB
3785/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3786 convert this into a shift followed by ANDing with D. */
3787(simplify
3788 (cond
3789 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
9e61e48e
JJ
3790 INTEGER_CST@2 integer_zerop)
3791 (if (integer_pow2p (@2))
3792 (with {
3793 int shift = (wi::exact_log2 (wi::to_wide (@2))
3794 - wi::exact_log2 (wi::to_wide (@1)));
3795 }
3796 (if (shift > 0)
3797 (bit_and
3798 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3799 (bit_and
3800 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3801 @2)))))
519e0faa 3802
b0eb889b
MG
3803/* If we have (A & C) != 0 where C is the sign bit of A, convert
3804 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3805(for cmp (eq ne)
3806 ncmp (ge lt)
3807 (simplify
3808 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3809 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2be65d9e 3810 && type_has_mode_precision_p (TREE_TYPE (@0))
b0eb889b 3811 && element_precision (@2) >= element_precision (@0)
8e6cdc90 3812 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
b0eb889b
MG
3813 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3814 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3815
519e0faa 3816/* If we have A < 0 ? C : 0 where C is a power of 2, convert
c0140e3c 3817 this into a right shift or sign extension followed by ANDing with C. */
519e0faa
PB
3818(simplify
3819 (cond
3820 (lt @0 integer_zerop)
9e61e48e
JJ
3821 INTEGER_CST@1 integer_zerop)
3822 (if (integer_pow2p (@1)
3823 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
c0140e3c 3824 (with {
8e6cdc90 3825 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
c0140e3c
JJ
3826 }
3827 (if (shift >= 0)
3828 (bit_and
3829 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3830 @1)
3831 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3832 sign extension followed by AND with C will achieve the effect. */
3833 (bit_and (convert @0) @1)))))
519e0faa 3834
68aba1f6
RB
3835/* When the addresses are not directly of decls compare base and offset.
3836 This implements some remaining parts of fold_comparison address
3837 comparisons but still no complete part of it. Still it is good
3838 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3839(for cmp (simple_comparison)
3840 (simplify
f501d5cd 3841 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
3842 (with
3843 {
a90c8804 3844 poly_int64 off0, off1;
68aba1f6
RB
3845 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3846 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3847 if (base0 && TREE_CODE (base0) == MEM_REF)
3848 {
aca52e6f 3849 off0 += mem_ref_offset (base0).force_shwi ();
68aba1f6
RB
3850 base0 = TREE_OPERAND (base0, 0);
3851 }
3852 if (base1 && TREE_CODE (base1) == MEM_REF)
3853 {
aca52e6f 3854 off1 += mem_ref_offset (base1).force_shwi ();
68aba1f6
RB
3855 base1 = TREE_OPERAND (base1, 0);
3856 }
3857 }
da571fda
RB
3858 (if (base0 && base1)
3859 (with
3860 {
aad88aed 3861 int equal = 2;
70f40fea
JJ
3862 /* Punt in GENERIC on variables with value expressions;
3863 the value expressions might point to fields/elements
3864 of other vars etc. */
3865 if (GENERIC
3866 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3867 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3868 ;
3869 else if (decl_in_symtab_p (base0)
3870 && decl_in_symtab_p (base1))
da571fda
RB
3871 equal = symtab_node::get_create (base0)
3872 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
3873 else if ((DECL_P (base0)
3874 || TREE_CODE (base0) == SSA_NAME
3875 || TREE_CODE (base0) == STRING_CST)
3876 && (DECL_P (base1)
3877 || TREE_CODE (base1) == SSA_NAME
3878 || TREE_CODE (base1) == STRING_CST))
aad88aed 3879 equal = (base0 == base1);
da571fda 3880 }
3fccbb9e
JJ
3881 (if (equal == 1
3882 && (cmp == EQ_EXPR || cmp == NE_EXPR
3883 /* If the offsets are equal we can ignore overflow. */
3884 || known_eq (off0, off1)
3885 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3886 /* Or if we compare using pointers to decls or strings. */
3887 || (POINTER_TYPE_P (TREE_TYPE (@2))
3888 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
da571fda 3889 (switch
a90c8804
RS
3890 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3891 { constant_boolean_node (known_eq (off0, off1), type); })
3892 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3893 { constant_boolean_node (known_ne (off0, off1), type); })
3894 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3895 { constant_boolean_node (known_lt (off0, off1), type); })
3896 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3897 { constant_boolean_node (known_le (off0, off1), type); })
3898 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3899 { constant_boolean_node (known_ge (off0, off1), type); })
3900 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3901 { constant_boolean_node (known_gt (off0, off1), type); }))
da571fda
RB
3902 (if (equal == 0
3903 && DECL_P (base0) && DECL_P (base1)
3904 /* If we compare this as integers require equal offset. */
3905 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
a90c8804 3906 || known_eq (off0, off1)))
da571fda
RB
3907 (switch
3908 (if (cmp == EQ_EXPR)
3909 { constant_boolean_node (false, type); })
3910 (if (cmp == NE_EXPR)
3911 { constant_boolean_node (true, type); })))))))))
66e1cacf 3912
98998245
RB
3913/* Simplify pointer equality compares using PTA. */
3914(for neeq (ne eq)
3915 (simplify
3916 (neeq @0 @1)
3917 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3918 && ptrs_compare_unequal (@0, @1))
f913ff2a 3919 { constant_boolean_node (neeq != EQ_EXPR, type); })))
98998245 3920
8f63caf6 3921/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
3922 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3923 Disable the transform if either operand is pointer to function.
3924 This broke pr22051-2.c for arm where function pointer
3925 canonicalizaion is not wanted. */
1c0a8806 3926
8f63caf6
RB
3927(for cmp (ne eq)
3928 (simplify
3929 (cmp (convert @0) INTEGER_CST@1)
f53e7e13
JJ
3930 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
3931 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3932 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3933 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3934 && POINTER_TYPE_P (TREE_TYPE (@1))
3935 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3936 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
8f63caf6
RB
3937 (cmp @0 (convert @1)))))
3938
21aacde4
RB
3939/* Non-equality compare simplifications from fold_binary */
3940(for cmp (lt gt le ge)
3941 /* Comparisons with the highest or lowest possible integer of
3942 the specified precision will have known values. */
3943 (simplify
f06e47d7
JJ
3944 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
3945 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
3946 || POINTER_TYPE_P (TREE_TYPE (@1))
3947 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
21aacde4
RB
3948 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3949 (with
3950 {
f06e47d7
JJ
3951 tree cst = uniform_integer_cst_p (@1);
3952 tree arg1_type = TREE_TYPE (cst);
21aacde4
RB
3953 unsigned int prec = TYPE_PRECISION (arg1_type);
3954 wide_int max = wi::max_value (arg1_type);
3955 wide_int signed_max = wi::max_value (prec, SIGNED);
3956 wide_int min = wi::min_value (arg1_type);
3957 }
3958 (switch
f06e47d7 3959 (if (wi::to_wide (cst) == max)
21aacde4
RB
3960 (switch
3961 (if (cmp == GT_EXPR)
3962 { constant_boolean_node (false, type); })
3963 (if (cmp == GE_EXPR)
3964 (eq @2 @1))
3965 (if (cmp == LE_EXPR)
3966 { constant_boolean_node (true, type); })
3967 (if (cmp == LT_EXPR)
3968 (ne @2 @1))))
f06e47d7 3969 (if (wi::to_wide (cst) == min)
21aacde4
RB
3970 (switch
3971 (if (cmp == LT_EXPR)
3972 { constant_boolean_node (false, type); })
3973 (if (cmp == LE_EXPR)
3974 (eq @2 @1))
3975 (if (cmp == GE_EXPR)
3976 { constant_boolean_node (true, type); })
3977 (if (cmp == GT_EXPR)
3978 (ne @2 @1))))
f06e47d7 3979 (if (wi::to_wide (cst) == max - 1)
9bc22d19
RB
3980 (switch
3981 (if (cmp == GT_EXPR)
f06e47d7
JJ
3982 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
3983 wide_int_to_tree (TREE_TYPE (cst),
3984 wi::to_wide (cst)
3985 + 1)); }))
9bc22d19 3986 (if (cmp == LE_EXPR)
f06e47d7
JJ
3987 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
3988 wide_int_to_tree (TREE_TYPE (cst),
3989 wi::to_wide (cst)
3990 + 1)); }))))
3991 (if (wi::to_wide (cst) == min + 1)
21aacde4
RB
3992 (switch
3993 (if (cmp == GE_EXPR)
f06e47d7
JJ
3994 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
3995 wide_int_to_tree (TREE_TYPE (cst),
3996 wi::to_wide (cst)
3997 - 1)); }))
21aacde4 3998 (if (cmp == LT_EXPR)
f06e47d7
JJ
3999 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4000 wide_int_to_tree (TREE_TYPE (cst),
4001 wi::to_wide (cst)
4002 - 1)); }))))
4003 (if (wi::to_wide (cst) == signed_max
21aacde4
RB
4004 && TYPE_UNSIGNED (arg1_type)
4005 /* We will flip the signedness of the comparison operator
4006 associated with the mode of @1, so the sign bit is
4007 specified by this mode. Check that @1 is the signed
4008 max associated with this sign bit. */
7a504f33 4009 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
21aacde4
RB
4010 /* signed_type does not work on pointer types. */
4011 && INTEGRAL_TYPE_P (arg1_type))
4012 /* The following case also applies to X < signed_max+1
4013 and X >= signed_max+1 because previous transformations. */
4014 (if (cmp == LE_EXPR || cmp == GT_EXPR)
f06e47d7
JJ
4015 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4016 (switch
4017 (if (cst == @1 && cmp == LE_EXPR)
4018 (ge (convert:st @0) { build_zero_cst (st); }))
4019 (if (cst == @1 && cmp == GT_EXPR)
4020 (lt (convert:st @0) { build_zero_cst (st); }))
4021 (if (cmp == LE_EXPR)
4022 (ge (view_convert:st @0) { build_zero_cst (st); }))
4023 (if (cmp == GT_EXPR)
4024 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
03cc70b5 4025
b5d3d787
RB
4026(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4027 /* If the second operand is NaN, the result is constant. */
4028 (simplify
4029 (cmp @0 REAL_CST@1)
4030 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4031 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 4032 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 4033 ? false : true, type); })))
21aacde4 4034
55cf3946
RB
4035/* bool_var != 0 becomes bool_var. */
4036(simplify
b5d3d787 4037 (ne @0 integer_zerop)
55cf3946
RB
4038 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4039 && types_match (type, TREE_TYPE (@0)))
4040 (non_lvalue @0)))
4041/* bool_var == 1 becomes bool_var. */
4042(simplify
b5d3d787 4043 (eq @0 integer_onep)
55cf3946
RB
4044 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4045 && types_match (type, TREE_TYPE (@0)))
4046 (non_lvalue @0)))
b5d3d787
RB
4047/* Do not handle
4048 bool_var == 0 becomes !bool_var or
4049 bool_var != 1 becomes !bool_var
4050 here because that only is good in assignment context as long
4051 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4052 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4053 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 4054
ca1206be
MG
4055/* When one argument is a constant, overflow detection can be simplified.
4056 Currently restricted to single use so as not to interfere too much with
4057 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4058 A + CST CMP A -> A CMP' CST' */
4059(for cmp (lt le ge gt)
4060 out (gt gt le le)
4061 (simplify
a8e9f9a3 4062 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
4063 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4064 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
8e6cdc90 4065 && wi::to_wide (@1) != 0
ca1206be 4066 && single_use (@2))
8e6cdc90
RS
4067 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4068 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4069 wi::max_value (prec, UNSIGNED)
4070 - wi::to_wide (@1)); })))))
ca1206be 4071
3563f78f
MG
4072/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4073 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4074 expects the long form, so we restrict the transformation for now. */
4075(for cmp (gt le)
4076 (simplify
a8e9f9a3 4077 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
4078 (if (single_use (@2)
4079 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4080 && TYPE_UNSIGNED (TREE_TYPE (@0))
4081 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4082 (cmp @1 @0))))
3563f78f
MG
4083
4084/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
4085/* A - B > A */
4086(for cmp (gt le)
4087 out (ne eq)
4088 (simplify
a8e9f9a3 4089 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
4090 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4091 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4092 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4093/* A + B < A */
4094(for cmp (lt ge)
4095 out (ne eq)
4096 (simplify
a8e9f9a3 4097 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
4098 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4099 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4100 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4101
603aeb87 4102/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 4103 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
4104(for cmp (lt ge)
4105 out (ne eq)
4106 (simplify
603aeb87 4107 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
4108 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4109 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4110 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 4111
53f3cd25
RS
4112/* Simplification of math builtins. These rules must all be optimizations
4113 as well as IL simplifications. If there is a possibility that the new
4114 form could be a pessimization, the rule should go in the canonicalization
4115 section that follows this one.
e18c1d66 4116
53f3cd25
RS
4117 Rules can generally go in this section if they satisfy one of
4118 the following:
4119
4120 - the rule describes an identity
4121
4122 - the rule replaces calls with something as simple as addition or
4123 multiplication
4124
4125 - the rule contains unary calls only and simplifies the surrounding
4126 arithmetic. (The idea here is to exclude non-unary calls in which
4127 one operand is constant and in which the call is known to be cheap
4128 when the operand has that value.) */
52c6378a 4129
53f3cd25 4130(if (flag_unsafe_math_optimizations)
52c6378a
N
4131 /* Simplify sqrt(x) * sqrt(x) -> x. */
4132 (simplify
c6cfa2bf 4133 (mult (SQRT_ALL@1 @0) @1)
52c6378a
N
4134 (if (!HONOR_SNANS (type))
4135 @0))
4136
ed17cb57
JW
4137 (for op (plus minus)
4138 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4139 (simplify
4140 (op (rdiv @0 @1)
4141 (rdiv @2 @1))
4142 (rdiv (op @0 @2) @1)))
4143
5e21d765
WD
4144 (for cmp (lt le gt ge)
4145 neg_cmp (gt ge lt le)
4146 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4147 (simplify
4148 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4149 (with
4150 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4151 (if (tem
4152 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4153 || (real_zerop (tem) && !real_zerop (@1))))
4154 (switch
4155 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4156 (cmp @0 { tem; }))
4157 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4158 (neg_cmp @0 { tem; })))))))
4159
35401640
N
4160 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4161 (for root (SQRT CBRT)
4162 (simplify
4163 (mult (root:s @0) (root:s @1))
4164 (root (mult @0 @1))))
4165
35401640
N
4166 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4167 (for exps (EXP EXP2 EXP10 POW10)
4168 (simplify
4169 (mult (exps:s @0) (exps:s @1))
4170 (exps (plus @0 @1))))
4171
52c6378a 4172 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
4173 (for root (SQRT CBRT)
4174 (simplify
4175 (rdiv @0 (root:s (rdiv:s @1 @2)))
4176 (mult @0 (root (rdiv @2 @1)))))
4177
4178 /* Simplify x/expN(y) into x*expN(-y). */
4179 (for exps (EXP EXP2 EXP10 POW10)
4180 (simplify
4181 (rdiv @0 (exps:s @1))
4182 (mult @0 (exps (negate @1)))))
52c6378a 4183
eee7b6c4
RB
4184 (for logs (LOG LOG2 LOG10 LOG10)
4185 exps (EXP EXP2 EXP10 POW10)
8acda9b2 4186 /* logN(expN(x)) -> x. */
e18c1d66
RB
4187 (simplify
4188 (logs (exps @0))
8acda9b2
RS
4189 @0)
4190 /* expN(logN(x)) -> x. */
4191 (simplify
4192 (exps (logs @0))
4193 @0))
53f3cd25 4194
e18c1d66
RB
4195 /* Optimize logN(func()) for various exponential functions. We
4196 want to determine the value "x" and the power "exponent" in
4197 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
4198 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4199 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
4200 (simplify
4201 (logs (exps @0))
c9e926ce
RS
4202 (if (SCALAR_FLOAT_TYPE_P (type))
4203 (with {
4204 tree x;
4205 switch (exps)
4206 {
4207 CASE_CFN_EXP:
4208 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4209 x = build_real_truncate (type, dconst_e ());
4210 break;
4211 CASE_CFN_EXP2:
4212 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4213 x = build_real (type, dconst2);
4214 break;
4215 CASE_CFN_EXP10:
4216 CASE_CFN_POW10:
4217 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4218 {
4219 REAL_VALUE_TYPE dconst10;
4220 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4221 x = build_real (type, dconst10);
4222 }
4223 break;
4224 default:
4225 gcc_unreachable ();
4226 }
4227 }
4228 (mult (logs { x; }) @0)))))
53f3cd25 4229
e18c1d66
RB
4230 (for logs (LOG LOG
4231 LOG2 LOG2
4232 LOG10 LOG10)
4233 exps (SQRT CBRT)
4234 (simplify
4235 (logs (exps @0))
c9e926ce
RS
4236 (if (SCALAR_FLOAT_TYPE_P (type))
4237 (with {
4238 tree x;
4239 switch (exps)
4240 {
4241 CASE_CFN_SQRT:
4242 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4243 x = build_real (type, dconsthalf);
4244 break;
4245 CASE_CFN_CBRT:
4246 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4247 x = build_real_truncate (type, dconst_third ());
4248 break;
4249 default:
4250 gcc_unreachable ();
4251 }
4252 }
4253 (mult { x; } (logs @0))))))
53f3cd25
RS
4254
4255 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
4256 (for logs (LOG LOG2 LOG10)
4257 pows (POW)
4258 (simplify
4259 (logs (pows @0 @1))
53f3cd25
RS
4260 (mult @1 (logs @0))))
4261
848bb6fc
JJ
4262 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4263 or if C is a positive power of 2,
4264 pow(C,x) -> exp2(log2(C)*x). */
30a2c10e 4265#if GIMPLE
e83fe013
WD
4266 (for pows (POW)
4267 exps (EXP)
4268 logs (LOG)
848bb6fc
JJ
4269 exp2s (EXP2)
4270 log2s (LOG2)
e83fe013
WD
4271 (simplify
4272 (pows REAL_CST@0 @1)
848bb6fc 4273 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
ef7866a3
JJ
4274 && real_isfinite (TREE_REAL_CST_PTR (@0))
4275 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4276 the use_exp2 case until after vectorization. It seems actually
4277 beneficial for all constants to postpone this until later,
4278 because exp(log(C)*x), while faster, will have worse precision
4279 and if x folds into a constant too, that is unnecessary
4280 pessimization. */
4281 && canonicalize_math_after_vectorization_p ())
848bb6fc
JJ
4282 (with {
4283 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4284 bool use_exp2 = false;
4285 if (targetm.libc_has_function (function_c99_misc)
4286 && value->cl == rvc_normal)
4287 {
4288 REAL_VALUE_TYPE frac_rvt = *value;
4289 SET_REAL_EXP (&frac_rvt, 1);
4290 if (real_equal (&frac_rvt, &dconst1))
4291 use_exp2 = true;
4292 }
4293 }
4294 (if (!use_exp2)
30a2c10e
JJ
4295 (if (optimize_pow_to_exp (@0, @1))
4296 (exps (mult (logs @0) @1)))
ef7866a3 4297 (exp2s (mult (log2s @0) @1)))))))
30a2c10e 4298#endif
e83fe013 4299
16ef0a8c
JJ
4300 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4301 (for pows (POW)
4302 exps (EXP EXP2 EXP10 POW10)
4303 logs (LOG LOG2 LOG10 LOG10)
4304 (simplify
4305 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4306 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4307 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4308 (exps (plus (mult (logs @0) @1) @2)))))
4309
53f3cd25
RS
4310 (for sqrts (SQRT)
4311 cbrts (CBRT)
b4838d77 4312 pows (POW)
53f3cd25
RS
4313 exps (EXP EXP2 EXP10 POW10)
4314 /* sqrt(expN(x)) -> expN(x*0.5). */
4315 (simplify
4316 (sqrts (exps @0))
4317 (exps (mult @0 { build_real (type, dconsthalf); })))
4318 /* cbrt(expN(x)) -> expN(x/3). */
4319 (simplify
4320 (cbrts (exps @0))
b4838d77
RS
4321 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4322 /* pow(expN(x), y) -> expN(x*y). */
4323 (simplify
4324 (pows (exps @0) @1)
4325 (exps (mult @0 @1))))
cfed37a0
RS
4326
4327 /* tan(atan(x)) -> x. */
4328 (for tans (TAN)
4329 atans (ATAN)
4330 (simplify
4331 (tans (atans @0))
4332 @0)))
53f3cd25 4333
121ef08b
GB
4334 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4335 (for sins (SIN)
4336 atans (ATAN)
4337 sqrts (SQRT)
4338 copysigns (COPYSIGN)
4339 (simplify
4340 (sins (atans:s @0))
4341 (with
4342 {
4343 REAL_VALUE_TYPE r_cst;
4344 build_sinatan_real (&r_cst, type);
4345 tree t_cst = build_real (type, r_cst);
4346 tree t_one = build_one_cst (type);
4347 }
4348 (if (SCALAR_FLOAT_TYPE_P (type))
4349 (cond (le (abs @0) { t_cst; })
4350 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4351 (copysigns { t_one; } @0))))))
4352
4353/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4354 (for coss (COS)
4355 atans (ATAN)
4356 sqrts (SQRT)
4357 copysigns (COPYSIGN)
4358 (simplify
4359 (coss (atans:s @0))
4360 (with
4361 {
4362 REAL_VALUE_TYPE r_cst;
4363 build_sinatan_real (&r_cst, type);
4364 tree t_cst = build_real (type, r_cst);
4365 tree t_one = build_one_cst (type);
4366 tree t_zero = build_zero_cst (type);
4367 }
4368 (if (SCALAR_FLOAT_TYPE_P (type))
4369 (cond (le (abs @0) { t_cst; })
4370 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4371 (copysigns { t_zero; } @0))))))
4372
abcc43f5
RS
4373/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4374(simplify
e04d2a35 4375 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
4376 (abs @0))
4377
67dbe582 4378/* trunc(trunc(x)) -> trunc(x), etc. */
c6cfa2bf 4379(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
4380 (simplify
4381 (fns (fns @0))
4382 (fns @0)))
4383/* f(x) -> x if x is integer valued and f does nothing for such values. */
c6cfa2bf 4384(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
4385 (simplify
4386 (fns integer_valued_real_p@0)
4387 @0))
67dbe582 4388
4d7836c4
RS
4389/* hypot(x,0) and hypot(0,x) -> abs(x). */
4390(simplify
c9e926ce 4391 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
4392 (abs @0))
4393
b4838d77
RS
4394/* pow(1,x) -> 1. */
4395(simplify
4396 (POW real_onep@0 @1)
4397 @0)
4398
461e4145
RS
4399(simplify
4400 /* copysign(x,x) -> x. */
c6cfa2bf 4401 (COPYSIGN_ALL @0 @0)
461e4145
RS
4402 @0)
4403
4404(simplify
4405 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
c6cfa2bf 4406 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
461e4145
RS
4407 (abs @0))
4408
86c0733f
RS
4409(for scale (LDEXP SCALBN SCALBLN)
4410 /* ldexp(0, x) -> 0. */
4411 (simplify
4412 (scale real_zerop@0 @1)
4413 @0)
4414 /* ldexp(x, 0) -> x. */
4415 (simplify
4416 (scale @0 integer_zerop@1)
4417 @0)
4418 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4419 (simplify
4420 (scale REAL_CST@0 @1)
4421 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4422 @0)))
4423
53f3cd25
RS
4424/* Canonicalization of sequences of math builtins. These rules represent
4425 IL simplifications but are not necessarily optimizations.
4426
4427 The sincos pass is responsible for picking "optimal" implementations
4428 of math builtins, which may be more complicated and can sometimes go
4429 the other way, e.g. converting pow into a sequence of sqrts.
4430 We only want to do these canonicalizations before the pass has run. */
4431
4432(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4433 /* Simplify tan(x) * cos(x) -> sin(x). */
4434 (simplify
4435 (mult:c (TAN:s @0) (COS:s @0))
4436 (SIN @0))
4437
4438 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4439 (simplify
de3fbea3 4440 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
4441 (if (!TREE_OVERFLOW (@1))
4442 (POW @0 (plus @1 { build_one_cst (type); }))))
4443
4444 /* Simplify sin(x) / cos(x) -> tan(x). */
4445 (simplify
4446 (rdiv (SIN:s @0) (COS:s @0))
4447 (TAN @0))
4448
4449 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4450 (simplify
4451 (rdiv (COS:s @0) (SIN:s @0))
4452 (rdiv { build_one_cst (type); } (TAN @0)))
4453
4454 /* Simplify sin(x) / tan(x) -> cos(x). */
4455 (simplify
4456 (rdiv (SIN:s @0) (TAN:s @0))
4457 (if (! HONOR_NANS (@0)
4458 && ! HONOR_INFINITIES (@0))
c9e926ce 4459 (COS @0)))
53f3cd25
RS
4460
4461 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4462 (simplify
4463 (rdiv (TAN:s @0) (SIN:s @0))
4464 (if (! HONOR_NANS (@0)
4465 && ! HONOR_INFINITIES (@0))
4466 (rdiv { build_one_cst (type); } (COS @0))))
4467
4468 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4469 (simplify
4470 (mult (POW:s @0 @1) (POW:s @0 @2))
4471 (POW @0 (plus @1 @2)))
4472
4473 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4474 (simplify
4475 (mult (POW:s @0 @1) (POW:s @2 @1))
4476 (POW (mult @0 @2) @1))
4477
de3fbea3
RB
4478 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4479 (simplify
4480 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4481 (POWI (mult @0 @2) @1))
4482
53f3cd25
RS
4483 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4484 (simplify
4485 (rdiv (POW:s @0 REAL_CST@1) @0)
4486 (if (!TREE_OVERFLOW (@1))
4487 (POW @0 (minus @1 { build_one_cst (type); }))))
4488
4489 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4490 (simplify
4491 (rdiv @0 (POW:s @1 @2))
4492 (mult @0 (POW @1 (negate @2))))
4493
4494 (for sqrts (SQRT)
4495 cbrts (CBRT)
4496 pows (POW)
4497 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4498 (simplify
4499 (sqrts (sqrts @0))
4500 (pows @0 { build_real (type, dconst_quarter ()); }))
4501 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4502 (simplify
4503 (sqrts (cbrts @0))
4504 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4505 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4506 (simplify
4507 (cbrts (sqrts @0))
4508 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4509 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4510 (simplify
4511 (cbrts (cbrts tree_expr_nonnegative_p@0))
4512 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4513 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4514 (simplify
4515 (sqrts (pows @0 @1))
4516 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4517 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4518 (simplify
4519 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
4520 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4521 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4522 (simplify
4523 (pows (sqrts @0) @1)
4524 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4525 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4526 (simplify
4527 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4528 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4529 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4530 (simplify
4531 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4532 (pows @0 (mult @1 @2))))
abcc43f5
RS
4533
4534 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4535 (simplify
4536 (CABS (complex @0 @0))
96285749
RS
4537 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4538
4d7836c4
RS
4539 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4540 (simplify
4541 (HYPOT @0 @0)
4542 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4543
96285749
RS
4544 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4545 (for cexps (CEXP)
4546 exps (EXP)
4547 cexpis (CEXPI)
4548 (simplify
4549 (cexps compositional_complex@0)
4550 (if (targetm.libc_has_function (function_c99_math_complex))
4551 (complex
4552 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4553 (mult @1 (imagpart @2)))))))
e18c1d66 4554
67dbe582
RS
4555(if (canonicalize_math_p ())
4556 /* floor(x) -> trunc(x) if x is nonnegative. */
c6cfa2bf
MM
4557 (for floors (FLOOR_ALL)
4558 truncs (TRUNC_ALL)
67dbe582
RS
4559 (simplify
4560 (floors tree_expr_nonnegative_p@0)
4561 (truncs @0))))
4562
4563(match double_value_p
4564 @0
4565 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4566(for froms (BUILT_IN_TRUNCL
4567 BUILT_IN_FLOORL
4568 BUILT_IN_CEILL
4569 BUILT_IN_ROUNDL
4570 BUILT_IN_NEARBYINTL
4571 BUILT_IN_RINTL)
4572 tos (BUILT_IN_TRUNC
4573 BUILT_IN_FLOOR
4574 BUILT_IN_CEIL
4575 BUILT_IN_ROUND
4576 BUILT_IN_NEARBYINT
4577 BUILT_IN_RINT)
4578 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4579 (if (optimize && canonicalize_math_p ())
4580 (simplify
4581 (froms (convert double_value_p@0))
4582 (convert (tos @0)))))
4583
4584(match float_value_p
4585 @0
4586 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4587(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4588 BUILT_IN_FLOORL BUILT_IN_FLOOR
4589 BUILT_IN_CEILL BUILT_IN_CEIL
4590 BUILT_IN_ROUNDL BUILT_IN_ROUND
4591 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4592 BUILT_IN_RINTL BUILT_IN_RINT)
4593 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4594 BUILT_IN_FLOORF BUILT_IN_FLOORF
4595 BUILT_IN_CEILF BUILT_IN_CEILF
4596 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4597 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4598 BUILT_IN_RINTF BUILT_IN_RINTF)
4599 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4600 if x is a float. */
5dac7dbd
JDA
4601 (if (optimize && canonicalize_math_p ()
4602 && targetm.libc_has_function (function_c99_misc))
67dbe582
RS
4603 (simplify
4604 (froms (convert float_value_p@0))
4605 (convert (tos @0)))))
4606
543a9bcd
RS
4607(for froms (XFLOORL XCEILL XROUNDL XRINTL)
4608 tos (XFLOOR XCEIL XROUND XRINT)
4609 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4610 (if (optimize && canonicalize_math_p ())
4611 (simplify
4612 (froms (convert double_value_p@0))
4613 (tos @0))))
4614
4615(for froms (XFLOORL XCEILL XROUNDL XRINTL
4616 XFLOOR XCEIL XROUND XRINT)
4617 tos (XFLOORF XCEILF XROUNDF XRINTF)
4618 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4619 if x is a float. */
4620 (if (optimize && canonicalize_math_p ())
4621 (simplify
4622 (froms (convert float_value_p@0))
4623 (tos @0))))
4624
4625(if (canonicalize_math_p ())
4626 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4627 (for floors (IFLOOR LFLOOR LLFLOOR)
4628 (simplify
4629 (floors tree_expr_nonnegative_p@0)
4630 (fix_trunc @0))))
4631
4632(if (canonicalize_math_p ())
4633 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4634 (for fns (IFLOOR LFLOOR LLFLOOR
4635 ICEIL LCEIL LLCEIL
4636 IROUND LROUND LLROUND)
4637 (simplify
4638 (fns integer_valued_real_p@0)
4639 (fix_trunc @0)))
4640 (if (!flag_errno_math)
4641 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4642 (for rints (IRINT LRINT LLRINT)
4643 (simplify
4644 (rints integer_valued_real_p@0)
4645 (fix_trunc @0)))))
4646
4647(if (canonicalize_math_p ())
4648 (for ifn (IFLOOR ICEIL IROUND IRINT)
4649 lfn (LFLOOR LCEIL LROUND LRINT)
4650 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4651 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4652 sizeof (int) == sizeof (long). */
4653 (if (TYPE_PRECISION (integer_type_node)
4654 == TYPE_PRECISION (long_integer_type_node))
4655 (simplify
4656 (ifn @0)
4657 (lfn:long_integer_type_node @0)))
4658 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4659 sizeof (long long) == sizeof (long). */
4660 (if (TYPE_PRECISION (long_long_integer_type_node)
4661 == TYPE_PRECISION (long_integer_type_node))
4662 (simplify
4663 (llfn @0)
4664 (lfn:long_integer_type_node @0)))))
4665
92c52eab
RS
4666/* cproj(x) -> x if we're ignoring infinities. */
4667(simplify
4668 (CPROJ @0)
4669 (if (!HONOR_INFINITIES (type))
4670 @0))
4671
4534c203
RB
4672/* If the real part is inf and the imag part is known to be
4673 nonnegative, return (inf + 0i). */
4674(simplify
4675 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4676 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
4677 { build_complex_inf (type, false); }))
4678
4534c203
RB
4679/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4680(simplify
4681 (CPROJ (complex @0 REAL_CST@1))
4682 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 4683 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 4684
b4838d77
RS
4685(for pows (POW)
4686 sqrts (SQRT)
4687 cbrts (CBRT)
4688 (simplify
4689 (pows @0 REAL_CST@1)
4690 (with {
4691 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4692 REAL_VALUE_TYPE tmp;
4693 }
4694 (switch
4695 /* pow(x,0) -> 1. */
4696 (if (real_equal (value, &dconst0))
4697 { build_real (type, dconst1); })
4698 /* pow(x,1) -> x. */
4699 (if (real_equal (value, &dconst1))
4700 @0)
4701 /* pow(x,-1) -> 1/x. */
4702 (if (real_equal (value, &dconstm1))
4703 (rdiv { build_real (type, dconst1); } @0))
4704 /* pow(x,0.5) -> sqrt(x). */
4705 (if (flag_unsafe_math_optimizations
4706 && canonicalize_math_p ()
4707 && real_equal (value, &dconsthalf))
4708 (sqrts @0))
4709 /* pow(x,1/3) -> cbrt(x). */
4710 (if (flag_unsafe_math_optimizations
4711 && canonicalize_math_p ()
4712 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4713 real_equal (value, &tmp)))
4714 (cbrts @0))))))
4534c203 4715
5ddc84ca
RS
4716/* powi(1,x) -> 1. */
4717(simplify
4718 (POWI real_onep@0 @1)
4719 @0)
4720
4721(simplify
4722 (POWI @0 INTEGER_CST@1)
4723 (switch
4724 /* powi(x,0) -> 1. */
8e6cdc90 4725 (if (wi::to_wide (@1) == 0)
5ddc84ca
RS
4726 { build_real (type, dconst1); })
4727 /* powi(x,1) -> x. */
8e6cdc90 4728 (if (wi::to_wide (@1) == 1)
5ddc84ca
RS
4729 @0)
4730 /* powi(x,-1) -> 1/x. */
8e6cdc90 4731 (if (wi::to_wide (@1) == -1)
5ddc84ca
RS
4732 (rdiv { build_real (type, dconst1); } @0))))
4733
03cc70b5 4734/* Narrowing of arithmetic and logical operations.
be144838
JL
4735
4736 These are conceptually similar to the transformations performed for
4737 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4738 term we want to move all that code out of the front-ends into here. */
4739
4740/* If we have a narrowing conversion of an arithmetic operation where
4741 both operands are widening conversions from the same type as the outer
4742 narrowing conversion. Then convert the innermost operands to a suitable
9c582551 4743 unsigned type (to avoid introducing undefined behavior), perform the
be144838
JL
4744 operation and convert the result to the desired type. */
4745(for op (plus minus)
4746 (simplify
93f90bec 4747 (convert (op:s (convert@2 @0) (convert?@3 @1)))
be144838
JL
4748 (if (INTEGRAL_TYPE_P (type)
4749 /* We check for type compatibility between @0 and @1 below,
4750 so there's no need to check that @1/@3 are integral types. */
4751 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4752 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4753 /* The precision of the type of each operand must match the
4754 precision of the mode of each operand, similarly for the
4755 result. */
2be65d9e
RS
4756 && type_has_mode_precision_p (TREE_TYPE (@0))
4757 && type_has_mode_precision_p (TREE_TYPE (@1))
4758 && type_has_mode_precision_p (type)
be144838
JL
4759 /* The inner conversion must be a widening conversion. */
4760 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
93f90bec
BC
4761 && types_match (@0, type)
4762 && (types_match (@0, @1)
4763 /* Or the second operand is const integer or converted const
4764 integer from valueize. */
4765 || TREE_CODE (@1) == INTEGER_CST))
be144838 4766 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
93f90bec 4767 (op @0 (convert @1))
8fdc6c67 4768 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
93f90bec
BC
4769 (convert (op (convert:utype @0)
4770 (convert:utype @1))))))))
48451e8f
JL
4771
4772/* This is another case of narrowing, specifically when there's an outer
4773 BIT_AND_EXPR which masks off bits outside the type of the innermost
4774 operands. Like the previous case we have to convert the operands
9c582551 4775 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
4776 arithmetic operation. */
4777(for op (minus plus)
8fdc6c67
RB
4778 (simplify
4779 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4780 (if (INTEGRAL_TYPE_P (type)
4781 /* We check for type compatibility between @0 and @1 below,
4782 so there's no need to check that @1/@3 are integral types. */
4783 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4784 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4785 /* The precision of the type of each operand must match the
4786 precision of the mode of each operand, similarly for the
4787 result. */
2be65d9e
RS
4788 && type_has_mode_precision_p (TREE_TYPE (@0))
4789 && type_has_mode_precision_p (TREE_TYPE (@1))
4790 && type_has_mode_precision_p (type)
8fdc6c67
RB
4791 /* The inner conversion must be a widening conversion. */
4792 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4793 && types_match (@0, @1)
4794 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4795 <= TYPE_PRECISION (TREE_TYPE (@0)))
8e6cdc90
RS
4796 && (wi::to_wide (@4)
4797 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4798 true, TYPE_PRECISION (type))) == 0)
8fdc6c67
RB
4799 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4800 (with { tree ntype = TREE_TYPE (@0); }
4801 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4802 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4803 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4804 (convert:utype @4))))))))
4f7a5692 4805
03cc70b5 4806/* Transform (@0 < @1 and @0 < @2) to use min,
4f7a5692 4807 (@0 > @1 and @0 > @2) to use max */
dac920e8
MG
4808(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
4809 op (lt le gt ge lt le gt ge )
4810 ext (min min max max max max min min )
4f7a5692 4811 (simplify
dac920e8 4812 (logic (op:cs @0 @1) (op:cs @0 @2))
4618c453
RB
4813 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4814 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
4815 (op @0 (ext @1 @2)))))
4816
7317ef4a
RS
4817(simplify
4818 /* signbit(x) -> 0 if x is nonnegative. */
4819 (SIGNBIT tree_expr_nonnegative_p@0)
4820 { integer_zero_node; })
4821
4822(simplify
4823 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4824 (SIGNBIT @0)
4825 (if (!HONOR_SIGNED_ZEROS (@0))
4826 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
4827
4828/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4829(for cmp (eq ne)
4830 (for op (plus minus)
4831 rop (minus plus)
4832 (simplify
4833 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4834 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4835 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4836 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4837 && !TYPE_SATURATING (TREE_TYPE (@0)))
4838 (with { tree res = int_const_binop (rop, @2, @1); }
75473a91
RB
4839 (if (TREE_OVERFLOW (res)
4840 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
a8b85ce9
MG
4841 { constant_boolean_node (cmp == NE_EXPR, type); }
4842 (if (single_use (@3))
11c1e63c
JJ
4843 (cmp @0 { TREE_OVERFLOW (res)
4844 ? drop_tree_overflow (res) : res; }))))))))
a8b85ce9
MG
4845(for cmp (lt le gt ge)
4846 (for op (plus minus)
4847 rop (minus plus)
4848 (simplify
4849 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4850 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4851 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4852 (with { tree res = int_const_binop (rop, @2, @1); }
4853 (if (TREE_OVERFLOW (res))
4854 {
4855 fold_overflow_warning (("assuming signed overflow does not occur "
4856 "when simplifying conditional to constant"),
4857 WARN_STRICT_OVERFLOW_CONDITIONAL);
4858 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4859 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
8e6cdc90
RS
4860 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4861 TYPE_SIGN (TREE_TYPE (@1)))
a8b85ce9
MG
4862 != (op == MINUS_EXPR);
4863 constant_boolean_node (less == ovf_high, type);
4864 }
4865 (if (single_use (@3))
4866 (with
4867 {
4868 fold_overflow_warning (("assuming signed overflow does not occur "
4869 "when changing X +- C1 cmp C2 to "
4870 "X cmp C2 -+ C1"),
4871 WARN_STRICT_OVERFLOW_COMPARISON);
4872 }
4873 (cmp @0 { res; })))))))))
d3e40b76
RB
4874
4875/* Canonicalizations of BIT_FIELD_REFs. */
4876
6ec96dcb
RB
4877(simplify
4878 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
4879 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
4880
4881(simplify
4882 (BIT_FIELD_REF (view_convert @0) @1 @2)
4883 (BIT_FIELD_REF @0 @1 @2))
4884
4885(simplify
4886 (BIT_FIELD_REF @0 @1 integer_zerop)
4887 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
4888 (view_convert @0)))
4889
d3e40b76
RB
4890(simplify
4891 (BIT_FIELD_REF @0 @1 @2)
4892 (switch
4893 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4894 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4895 (switch
4896 (if (integer_zerop (@2))
4897 (view_convert (realpart @0)))
4898 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4899 (view_convert (imagpart @0)))))
4900 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4901 && INTEGRAL_TYPE_P (type)
171f6f05
RB
4902 /* On GIMPLE this should only apply to register arguments. */
4903 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
4904 /* A bit-field-ref that referenced the full argument can be stripped. */
4905 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4906 && integer_zerop (@2))
4907 /* Low-parts can be reduced to integral conversions.
4908 ??? The following doesn't work for PDP endian. */
4909 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4910 /* Don't even think about BITS_BIG_ENDIAN. */
4911 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4912 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4913 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4914 ? (TYPE_PRECISION (TREE_TYPE (@0))
4915 - TYPE_PRECISION (type))
4916 : 0)) == 0)))
4917 (convert @0))))
4918
4919/* Simplify vector extracts. */
4920
4921(simplify
4922 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4923 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4924 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4925 || (VECTOR_TYPE_P (type)
4926 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4927 (with
4928 {
4929 tree ctor = (TREE_CODE (@0) == SSA_NAME
4930 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4931 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4932 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4933 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4934 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4935 }
4936 (if (n != 0
4937 && (idx % width) == 0
4938 && (n % width) == 0
928686b1
RS
4939 && known_le ((idx + n) / width,
4940 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
d3e40b76
RB
4941 (with
4942 {
4943 idx = idx / width;
4944 n = n / width;
4945 /* Constructor elements can be subvectors. */
d34457c1 4946 poly_uint64 k = 1;
d3e40b76
RB
4947 if (CONSTRUCTOR_NELTS (ctor) != 0)
4948 {
4949 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4950 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4951 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4952 }
d34457c1 4953 unsigned HOST_WIDE_INT elt, count, const_k;
d3e40b76
RB
4954 }
4955 (switch
4956 /* We keep an exact subset of the constructor elements. */
d34457c1 4957 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
d3e40b76
RB
4958 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4959 { build_constructor (type, NULL); }
d34457c1
RS
4960 (if (count == 1)
4961 (if (elt < CONSTRUCTOR_NELTS (ctor))
4c1da8ea 4962 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
d34457c1 4963 { build_zero_cst (type); })
d3e40b76 4964 {
d34457c1
RS
4965 vec<constructor_elt, va_gc> *vals;
4966 vec_alloc (vals, count);
4967 for (unsigned i = 0;
4968 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
4969 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4970 CONSTRUCTOR_ELT (ctor, elt + i)->value);
4971 build_constructor (type, vals);
4972 })))
d3e40b76 4973 /* The bitfield references a single constructor element. */
d34457c1
RS
4974 (if (k.is_constant (&const_k)
4975 && idx + n <= (idx / const_k + 1) * const_k)
d3e40b76 4976 (switch
d34457c1 4977 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
d3e40b76 4978 { build_zero_cst (type); })
d34457c1 4979 (if (n == const_k)
4c1da8ea 4980 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
d34457c1
RS
4981 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
4982 @1 { bitsize_int ((idx % const_k) * width); })))))))))
92e29a5e
RB
4983
4984/* Simplify a bit extraction from a bit insertion for the cases with
4985 the inserted element fully covering the extraction or the insertion
4986 not touching the extraction. */
4987(simplify
4988 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4989 (with
4990 {
4991 unsigned HOST_WIDE_INT isize;
4992 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4993 isize = TYPE_PRECISION (TREE_TYPE (@1));
4994 else
4995 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4996 }
4997 (switch
8e6cdc90
RS
4998 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4999 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5000 wi::to_wide (@ipos) + isize))
92e29a5e 5001 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
8e6cdc90
RS
5002 wi::to_wide (@rpos)
5003 - wi::to_wide (@ipos)); }))
5004 (if (wi::geu_p (wi::to_wide (@ipos),
5005 wi::to_wide (@rpos) + wi::to_wide (@rsize))
5006 || wi::geu_p (wi::to_wide (@rpos),
5007 wi::to_wide (@ipos) + isize))
92e29a5e 5008 (BIT_FIELD_REF @0 @rsize @rpos)))))
c566cc9f 5009
c453ccc2
RS
5010(if (canonicalize_math_after_vectorization_p ())
5011 (for fmas (FMA)
5012 (simplify
5013 (fmas:c (negate @0) @1 @2)
5014 (IFN_FNMA @0 @1 @2))
5015 (simplify
5016 (fmas @0 @1 (negate @2))
5017 (IFN_FMS @0 @1 @2))
5018 (simplify
5019 (fmas:c (negate @0) @1 (negate @2))
5020 (IFN_FNMS @0 @1 @2))
5021 (simplify
5022 (negate (fmas@3 @0 @1 @2))
5023 (if (single_use (@3))
5024 (IFN_FNMS @0 @1 @2))))
5025
c566cc9f 5026 (simplify
c453ccc2
RS
5027 (IFN_FMS:c (negate @0) @1 @2)
5028 (IFN_FNMS @0 @1 @2))
5029 (simplify
5030 (IFN_FMS @0 @1 (negate @2))
5031 (IFN_FMA @0 @1 @2))
5032 (simplify
5033 (IFN_FMS:c (negate @0) @1 (negate @2))
c566cc9f
RS
5034 (IFN_FNMA @0 @1 @2))
5035 (simplify
c453ccc2
RS
5036 (negate (IFN_FMS@3 @0 @1 @2))
5037 (if (single_use (@3))
5038 (IFN_FNMA @0 @1 @2)))
5039
5040 (simplify
5041 (IFN_FNMA:c (negate @0) @1 @2)
5042 (IFN_FMA @0 @1 @2))
c566cc9f 5043 (simplify
c453ccc2 5044 (IFN_FNMA @0 @1 (negate @2))
c566cc9f
RS
5045 (IFN_FNMS @0 @1 @2))
5046 (simplify
c453ccc2
RS
5047 (IFN_FNMA:c (negate @0) @1 (negate @2))
5048 (IFN_FMS @0 @1 @2))
5049 (simplify
5050 (negate (IFN_FNMA@3 @0 @1 @2))
c566cc9f 5051 (if (single_use (@3))
c453ccc2 5052 (IFN_FMS @0 @1 @2)))
c566cc9f 5053
c453ccc2
RS
5054 (simplify
5055 (IFN_FNMS:c (negate @0) @1 @2)
5056 (IFN_FMS @0 @1 @2))
5057 (simplify
5058 (IFN_FNMS @0 @1 (negate @2))
5059 (IFN_FNMA @0 @1 @2))
5060 (simplify
5061 (IFN_FNMS:c (negate @0) @1 (negate @2))
5062 (IFN_FMA @0 @1 @2))
5063 (simplify
5064 (negate (IFN_FNMS@3 @0 @1 @2))
c566cc9f 5065 (if (single_use (@3))
c453ccc2 5066 (IFN_FMA @0 @1 @2))))
ba6557e2
RS
5067
5068/* POPCOUNT simplifications. */
5069(for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5070 BUILT_IN_POPCOUNTIMAX)
5071 /* popcount(X&1) is nop_expr(X&1). */
5072 (simplify
5073 (popcount @0)
5074 (if (tree_nonzero_bits (@0) == 1)
5075 (convert @0)))
5076 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5077 (simplify
5078 (plus (popcount:s @0) (popcount:s @1))
5079 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5080 (popcount (bit_ior @0 @1))))
5081 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5082 (for cmp (le eq ne gt)
5083 rep (eq eq ne ne)
5084 (simplify
5085 (cmp (popcount @0) integer_zerop)
5086 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
0d2b3bca
RS
5087
5088/* Simplify:
5089
5090 a = a1 op a2
5091 r = c ? a : b;
5092
5093 to:
5094
5095 r = c ? a1 op a2 : b;
5096
5097 if the target can do it in one go. This makes the operation conditional
5098 on c, so could drop potentially-trapping arithmetic, but that's a valid
5099 simplification if the result of the operation isn't needed. */
5100(for uncond_op (UNCOND_BINARY)
5101 cond_op (COND_BINARY)
5102 (simplify
5103 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5104 (with { tree op_type = TREE_TYPE (@4); }
5105 (if (element_precision (type) == element_precision (op_type))
5106 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5107 (simplify
5108 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5109 (with { tree op_type = TREE_TYPE (@4); }
5110 (if (element_precision (type) == element_precision (op_type))
5111 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
6a86928d 5112
b41d1f6e
RS
5113/* Same for ternary operations. */
5114(for uncond_op (UNCOND_TERNARY)
5115 cond_op (COND_TERNARY)
5116 (simplify
5117 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5118 (with { tree op_type = TREE_TYPE (@5); }
5119 (if (element_precision (type) == element_precision (op_type))
5120 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5121 (simplify
5122 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5123 (with { tree op_type = TREE_TYPE (@5); }
5124 (if (element_precision (type) == element_precision (op_type))
5125 (view_convert (cond_op (bit_not @0) @2 @3 @4
5126 (view_convert:op_type @1)))))))
5127
6a86928d
RS
5128/* Detect cases in which a VEC_COND_EXPR effectively replaces the
5129 "else" value of an IFN_COND_*. */
5130(for cond_op (COND_BINARY)
5131 (simplify
5132 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5133 (with { tree op_type = TREE_TYPE (@3); }
5134 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
5135 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5136 (simplify
5137 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5138 (with { tree op_type = TREE_TYPE (@5); }
5139 (if (inverse_conditions_p (@0, @2)
5140 && element_precision (type) == element_precision (op_type))
5141 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
b41d1f6e
RS
5142
5143/* Same for ternary operations. */
5144(for cond_op (COND_TERNARY)
5145 (simplify
5146 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5147 (with { tree op_type = TREE_TYPE (@4); }
5148 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
5149 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5150 (simplify
5151 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5152 (with { tree op_type = TREE_TYPE (@6); }
5153 (if (inverse_conditions_p (@0, @2)
5154 && element_precision (type) == element_precision (op_type))
5155 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
a19f98d5
RS
5156
5157/* For pointers @0 and @2 and nonnegative constant offset @1, look for
5158 expressions like:
5159
5160 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5161 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5162
5163 If pointers are known not to wrap, B checks whether @1 bytes starting
5164 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5165 bytes. A is more efficiently tested as:
5166
5167 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5168
5169 The equivalent expression for B is given by replacing @1 with @1 - 1:
5170
5171 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5172
5173 @0 and @2 can be swapped in both expressions without changing the result.
5174
5175 The folds rely on sizetype's being unsigned (which is always true)
5176 and on its being the same width as the pointer (which we have to check).
5177
5178 The fold replaces two pointer_plus expressions, two comparisons and
5179 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5180 the best case it's a saving of two operations. The A fold retains one
5181 of the original pointer_pluses, so is a win even if both pointer_pluses
5182 are used elsewhere. The B fold is a wash if both pointer_pluses are
5183 used elsewhere, since all we end up doing is replacing a comparison with
5184 a pointer_plus. We do still apply the fold under those circumstances
5185 though, in case applying it to other conditions eventually makes one of the
5186 pointer_pluses dead. */
5187(for ior (truth_orif truth_or bit_ior)
5188 (for cmp (le lt)
5189 (simplify
5190 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5191 (cmp:cs (pointer_plus@4 @2 @1) @0))
5192 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5193 && TYPE_OVERFLOW_WRAPS (sizetype)
5194 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5195 /* Calculate the rhs constant. */
5196 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5197 offset_int rhs = off * 2; }
5198 /* Always fails for negative values. */
5199 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5200 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5201 pick a canonical order. This increases the chances of using the
5202 same pointer_plus in multiple checks. */
5203 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5204 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5205 (if (cmp == LT_EXPR)
5206 (gt (convert:sizetype
5207 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5208 { swap_p ? @0 : @2; }))
5209 { rhs_tree; })
5210 (gt (convert:sizetype
5211 (pointer_diff:ssizetype
5212 (pointer_plus { swap_p ? @2 : @0; }
5213 { wide_int_to_tree (sizetype, off); })
5214 { swap_p ? @0 : @2; }))
5215 { rhs_tree; })))))))))