]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
Use more switch statements.
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
a5544970 5 Copyright (C) 2014-2019 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
46c66a46 32 initializer_each_zero_or_onep
f3582e54 33 CONSTANT_CLASS_P
887ab609 34 tree_expr_nonnegative_p
e36c1cfe 35 tree_expr_nonzero_p
67dbe582 36 integer_valued_real_p
53a19317 37 integer_pow2p
f06e47d7 38 uniform_integer_cst_p
21caa1a2
PK
39 HONOR_NANS
40 uniform_vector_p)
e0ee10ed 41
f84e7fd6
RB
42/* Operator lists. */
43(define_operator_list tcc_comparison
44 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
45(define_operator_list inverted_tcc_comparison
46 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
47(define_operator_list inverted_tcc_comparison_with_nans
48 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
49(define_operator_list swapped_tcc_comparison
50 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
51(define_operator_list simple_comparison lt le eq ne ge gt)
52(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
53
b1dc4a20 54#include "cfn-operators.pd"
257aecb4 55
543a9bcd
RS
56/* Define operand lists for math rounding functions {,i,l,ll}FN,
57 where the versions prefixed with "i" return an int, those prefixed with
58 "l" return a long and those prefixed with "ll" return a long long.
59
60 Also define operand lists:
61
62 X<FN>F for all float functions, in the order i, l, ll
63 X<FN> for all double functions, in the same order
64 X<FN>L for all long double functions, in the same order. */
65#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
66 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
67 BUILT_IN_L##FN##F \
68 BUILT_IN_LL##FN##F) \
69 (define_operator_list X##FN BUILT_IN_I##FN \
70 BUILT_IN_L##FN \
71 BUILT_IN_LL##FN) \
72 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 BUILT_IN_L##FN##L \
74 BUILT_IN_LL##FN##L)
75
543a9bcd
RS
76DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
77DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
78DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
79DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
0d2b3bca
RS
80
81/* Binary operations and their associated IFN_COND_* function. */
82(define_operator_list UNCOND_BINARY
83 plus minus
6c4fd4a9 84 mult trunc_div trunc_mod rdiv
0d2b3bca 85 min max
20103c0e
RS
86 bit_and bit_ior bit_xor
87 lshift rshift)
0d2b3bca
RS
88(define_operator_list COND_BINARY
89 IFN_COND_ADD IFN_COND_SUB
6c4fd4a9 90 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
0d2b3bca 91 IFN_COND_MIN IFN_COND_MAX
20103c0e
RS
92 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
93 IFN_COND_SHL IFN_COND_SHR)
b41d1f6e
RS
94
95/* Same for ternary operations. */
96(define_operator_list UNCOND_TERNARY
97 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
98(define_operator_list COND_TERNARY
99 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
03cc70b5 100
ed73f46f
MG
101/* As opposed to convert?, this still creates a single pattern, so
102 it is not a suitable replacement for convert? in all cases. */
103(match (nop_convert @0)
104 (convert @0)
105 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
106(match (nop_convert @0)
107 (view_convert @0)
108 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
928686b1
RS
109 && known_eq (TYPE_VECTOR_SUBPARTS (type),
110 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
ed73f46f
MG
111 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
112/* This one has to be last, or it shadows the others. */
113(match (nop_convert @0)
03cc70b5 114 @0)
f84e7fd6 115
e197e64e
KV
116/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
117 ABSU_EXPR returns unsigned absolute value of the operand and the operand
118 of the ABSU_EXPR will have the corresponding signed type. */
119(simplify (abs (convert @0))
120 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
121 && !TYPE_UNSIGNED (TREE_TYPE (@0))
122 && element_precision (type) > element_precision (TREE_TYPE (@0)))
123 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
124 (convert (absu:utype @0)))))
125
126
e0ee10ed 127/* Simplifications of operations with one constant operand and
36a60e48 128 simplifications to constants or single values. */
e0ee10ed
RB
129
130(for op (plus pointer_plus minus bit_ior bit_xor)
131 (simplify
132 (op @0 integer_zerop)
133 (non_lvalue @0)))
134
a499aac5
RB
135/* 0 +p index -> (type)index */
136(simplify
137 (pointer_plus integer_zerop @1)
138 (non_lvalue (convert @1)))
139
d43177ad
MG
140/* ptr - 0 -> (type)ptr */
141(simplify
142 (pointer_diff @0 integer_zerop)
143 (convert @0))
144
a7f24614
RB
145/* See if ARG1 is zero and X + ARG1 reduces to X.
146 Likewise if the operands are reversed. */
147(simplify
148 (plus:c @0 real_zerop@1)
149 (if (fold_real_zero_addition_p (type, @1, 0))
150 (non_lvalue @0)))
151
152/* See if ARG1 is zero and X - ARG1 reduces to X. */
153(simplify
154 (minus @0 real_zerop@1)
155 (if (fold_real_zero_addition_p (type, @1, 1))
156 (non_lvalue @0)))
f7b7e5d0
JJ
157
158/* Even if the fold_real_zero_addition_p can't simplify X + 0.0
159 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
160 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
161 if not -frounding-math. For sNaNs the first operation would raise
162 exceptions but turn the result into qNan, so the second operation
163 would not raise it. */
164(for inner_op (plus minus)
165 (for outer_op (plus minus)
166 (simplify
167 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
168 (if (real_zerop (@1)
169 && real_zerop (@2)
170 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
171 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
172 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
173 bool outer_plus
174 = ((outer_op == PLUS_EXPR)
175 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
176 (if (outer_plus && !inner_plus)
177 (outer_op @0 @2)
178 @3))))))
a7f24614 179
e0ee10ed
RB
180/* Simplify x - x.
181 This is unsafe for certain floats even in non-IEEE formats.
182 In IEEE, it is unsafe because it does wrong for NaNs.
183 Also note that operand_equal_p is always false if an operand
184 is volatile. */
185(simplify
a7f24614 186 (minus @0 @0)
1b457aa4 187 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 188 { build_zero_cst (type); }))
1af4ebf5
MG
189(simplify
190 (pointer_diff @@0 @0)
191 { build_zero_cst (type); })
e0ee10ed
RB
192
193(simplify
a7f24614
RB
194 (mult @0 integer_zerop@1)
195 @1)
196
197/* Maybe fold x * 0 to 0. The expressions aren't the same
198 when x is NaN, since x * 0 is also NaN. Nor are they the
199 same in modes with signed zeros, since multiplying a
200 negative value by 0 gives -0, not +0. */
201(simplify
202 (mult @0 real_zerop@1)
8b5ee871 203 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
204 @1))
205
206/* In IEEE floating point, x*1 is not equivalent to x for snans.
207 Likewise for complex arithmetic with signed zeros. */
208(simplify
209 (mult @0 real_onep)
8b5ee871
MG
210 (if (!HONOR_SNANS (type)
211 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
212 || !COMPLEX_FLOAT_TYPE_P (type)))
213 (non_lvalue @0)))
214
215/* Transform x * -1.0 into -x. */
216(simplify
217 (mult @0 real_minus_onep)
8b5ee871
MG
218 (if (!HONOR_SNANS (type)
219 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
220 || !COMPLEX_FLOAT_TYPE_P (type)))
221 (negate @0)))
e0ee10ed 222
ea8a6038
ML
223/* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */
224(simplify
225 (mult SSA_NAME@1 SSA_NAME@2)
226 (if (INTEGRAL_TYPE_P (type)
227 && get_nonzero_bits (@1) == 1
228 && get_nonzero_bits (@2) == 1)
229 (bit_and @1 @2)))
230
46c66a46
RS
231/* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
232 unless the target has native support for the former but not the latter. */
233(simplify
234 (mult @0 VECTOR_CST@1)
235 (if (initializer_each_zero_or_onep (@1)
236 && !HONOR_SNANS (type)
237 && !HONOR_SIGNED_ZEROS (type))
238 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
239 (if (itype
240 && (!VECTOR_MODE_P (TYPE_MODE (type))
241 || (VECTOR_MODE_P (TYPE_MODE (itype))
242 && optab_handler (and_optab,
243 TYPE_MODE (itype)) != CODE_FOR_nothing)))
244 (view_convert (bit_and:itype (view_convert @0)
245 (ne @1 { build_zero_cst (type); })))))))
246
8c2805bb
AP
247(for cmp (gt ge lt le)
248 outp (convert convert negate negate)
249 outn (negate negate convert convert)
250 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
251 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
252 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
253 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
254 (simplify
255 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
256 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
257 && types_match (type, TREE_TYPE (@0)))
258 (switch
259 (if (types_match (type, float_type_node))
260 (BUILT_IN_COPYSIGNF @1 (outp @0)))
261 (if (types_match (type, double_type_node))
262 (BUILT_IN_COPYSIGN @1 (outp @0)))
263 (if (types_match (type, long_double_type_node))
264 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
265 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
266 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
267 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
268 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
269 (simplify
270 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
271 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
272 && types_match (type, TREE_TYPE (@0)))
273 (switch
274 (if (types_match (type, float_type_node))
275 (BUILT_IN_COPYSIGNF @1 (outn @0)))
276 (if (types_match (type, double_type_node))
277 (BUILT_IN_COPYSIGN @1 (outn @0)))
278 (if (types_match (type, long_double_type_node))
279 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
280
281/* Transform X * copysign (1.0, X) into abs(X). */
282(simplify
c6cfa2bf 283 (mult:c @0 (COPYSIGN_ALL real_onep @0))
8c2805bb
AP
284 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
285 (abs @0)))
286
287/* Transform X * copysign (1.0, -X) into -abs(X). */
288(simplify
c6cfa2bf 289 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
8c2805bb
AP
290 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
291 (negate (abs @0))))
292
293/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
294(simplify
c6cfa2bf 295 (COPYSIGN_ALL REAL_CST@0 @1)
8c2805bb 296 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
c6cfa2bf 297 (COPYSIGN_ALL (negate @0) @1)))
8c2805bb 298
5b7f6ed0 299/* X * 1, X / 1 -> X. */
e0ee10ed
RB
300(for op (mult trunc_div ceil_div floor_div round_div exact_div)
301 (simplify
302 (op @0 integer_onep)
303 (non_lvalue @0)))
304
71f82be9
JG
305/* (A / (1 << B)) -> (A >> B).
306 Only for unsigned A. For signed A, this would not preserve rounding
307 toward zero.
873140e6
JJ
308 For example: (-1 / ( 1 << B)) != -1 >> B.
309 Also also widening conversions, like:
310 (A / (unsigned long long) (1U << B)) -> (A >> B)
311 or
312 (A / (unsigned long long) (1 << B)) -> (A >> B).
313 If the left shift is signed, it can be done only if the upper bits
314 of A starting from shift's type sign bit are zero, as
315 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
316 so it is valid only if A >> 31 is zero. */
71f82be9 317(simplify
873140e6 318 (trunc_div @0 (convert? (lshift integer_onep@1 @2)))
71f82be9
JG
319 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
320 && (!VECTOR_TYPE_P (type)
321 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
873140e6
JJ
322 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
323 && (useless_type_conversion_p (type, TREE_TYPE (@1))
324 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
325 && (TYPE_UNSIGNED (TREE_TYPE (@1))
326 || (element_precision (type)
327 == element_precision (TREE_TYPE (@1)))
6d5093da
JJ
328 || (INTEGRAL_TYPE_P (type)
329 && (tree_nonzero_bits (@0)
330 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
331 true,
332 element_precision (type))) == 0)))))
71f82be9
JG
333 (rshift @0 @2)))
334
5b7f6ed0
MG
335/* Preserve explicit divisions by 0: the C++ front-end wants to detect
336 undefined behavior in constexpr evaluation, and assuming that the division
337 traps enables better optimizations than these anyway. */
a7f24614 338(for div (trunc_div ceil_div floor_div round_div exact_div)
5b7f6ed0
MG
339 /* 0 / X is always zero. */
340 (simplify
341 (div integer_zerop@0 @1)
342 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
343 (if (!integer_zerop (@1))
344 @0))
da186c1f 345 /* X / -1 is -X. */
a7f24614 346 (simplify
09240451
MG
347 (div @0 integer_minus_onep@1)
348 (if (!TYPE_UNSIGNED (type))
da186c1f 349 (negate @0)))
5b7f6ed0
MG
350 /* X / X is one. */
351 (simplify
352 (div @0 @0)
9ebce098
JJ
353 /* But not for 0 / 0 so that we can get the proper warnings and errors.
354 And not for _Fract types where we can't build 1. */
355 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
5b7f6ed0 356 { build_one_cst (type); }))
03cc70b5 357 /* X / abs (X) is X < 0 ? -1 : 1. */
da186c1f 358 (simplify
d96a5585
RB
359 (div:C @0 (abs @0))
360 (if (INTEGRAL_TYPE_P (type)
da186c1f
RB
361 && TYPE_OVERFLOW_UNDEFINED (type))
362 (cond (lt @0 { build_zero_cst (type); })
363 { build_minus_one_cst (type); } { build_one_cst (type); })))
364 /* X / -X is -1. */
365 (simplify
d96a5585 366 (div:C @0 (negate @0))
da186c1f
RB
367 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
368 && TYPE_OVERFLOW_UNDEFINED (type))
369 { build_minus_one_cst (type); })))
a7f24614
RB
370
371/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
372 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
373(simplify
374 (floor_div @0 @1)
09240451
MG
375 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
376 && TYPE_UNSIGNED (type))
a7f24614
RB
377 (trunc_div @0 @1)))
378
28093105
RB
379/* Combine two successive divisions. Note that combining ceil_div
380 and floor_div is trickier and combining round_div even more so. */
381(for div (trunc_div exact_div)
c306cfaf 382 (simplify
98610dc5 383 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
c306cfaf 384 (with {
4a669ac3 385 wi::overflow_type overflow;
8e6cdc90 386 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 387 TYPE_SIGN (type), &overflow);
c306cfaf 388 }
98610dc5
JJ
389 (if (div == EXACT_DIV_EXPR
390 || optimize_successive_divisions_p (@2, @3))
391 (if (!overflow)
392 (div @0 { wide_int_to_tree (type, mul); })
393 (if (TYPE_UNSIGNED (type)
394 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
395 { build_zero_cst (type); }))))))
c306cfaf 396
288fe52e
AM
397/* Combine successive multiplications. Similar to above, but handling
398 overflow is different. */
399(simplify
400 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
401 (with {
4a669ac3 402 wi::overflow_type overflow;
8e6cdc90 403 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 404 TYPE_SIGN (type), &overflow);
288fe52e
AM
405 }
406 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
407 otherwise undefined overflow implies that @0 must be zero. */
4a669ac3 408 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
288fe52e
AM
409 (mult @0 { wide_int_to_tree (type, mul); }))))
410
a7f24614 411/* Optimize A / A to 1.0 if we don't care about
09240451 412 NaNs or Infinities. */
a7f24614
RB
413(simplify
414 (rdiv @0 @0)
09240451 415 (if (FLOAT_TYPE_P (type)
1b457aa4 416 && ! HONOR_NANS (type)
8b5ee871 417 && ! HONOR_INFINITIES (type))
09240451
MG
418 { build_one_cst (type); }))
419
420/* Optimize -A / A to -1.0 if we don't care about
421 NaNs or Infinities. */
422(simplify
e04d2a35 423 (rdiv:C @0 (negate @0))
09240451 424 (if (FLOAT_TYPE_P (type)
1b457aa4 425 && ! HONOR_NANS (type)
8b5ee871 426 && ! HONOR_INFINITIES (type))
09240451 427 { build_minus_one_cst (type); }))
a7f24614 428
8c6961ca
PK
429/* PR71078: x / abs(x) -> copysign (1.0, x) */
430(simplify
431 (rdiv:C (convert? @0) (convert? (abs @0)))
432 (if (SCALAR_FLOAT_TYPE_P (type)
433 && ! HONOR_NANS (type)
434 && ! HONOR_INFINITIES (type))
435 (switch
436 (if (types_match (type, float_type_node))
437 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
438 (if (types_match (type, double_type_node))
439 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
440 (if (types_match (type, long_double_type_node))
441 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
442
a7f24614
RB
443/* In IEEE floating point, x/1 is not equivalent to x for snans. */
444(simplify
445 (rdiv @0 real_onep)
8b5ee871 446 (if (!HONOR_SNANS (type))
a7f24614
RB
447 (non_lvalue @0)))
448
449/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
450(simplify
451 (rdiv @0 real_minus_onep)
8b5ee871 452 (if (!HONOR_SNANS (type))
a7f24614
RB
453 (negate @0)))
454
5711ac88 455(if (flag_reciprocal_math)
81825e28 456 /* Convert (A/B)/C to A/(B*C). */
5711ac88
N
457 (simplify
458 (rdiv (rdiv:s @0 @1) @2)
81825e28
WD
459 (rdiv @0 (mult @1 @2)))
460
461 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
462 (simplify
463 (rdiv @0 (mult:s @1 REAL_CST@2))
464 (with
465 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
466 (if (tem)
467 (rdiv (mult @0 { tem; } ) @1))))
5711ac88
N
468
469 /* Convert A/(B/C) to (A/B)*C */
470 (simplify
471 (rdiv @0 (rdiv:s @1 @2))
472 (mult (rdiv @0 @1) @2)))
473
6a435314
WD
474/* Simplify x / (- y) to -x / y. */
475(simplify
476 (rdiv @0 (negate @1))
477 (rdiv (negate @0) @1))
478
5e21d765
WD
479(if (flag_unsafe_math_optimizations)
480 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
481 Since C / x may underflow to zero, do this only for unsafe math. */
482 (for op (lt le gt ge)
483 neg_op (gt ge lt le)
484 (simplify
485 (op (rdiv REAL_CST@0 @1) real_zerop@2)
486 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
487 (switch
488 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
489 (op @1 @2))
490 /* For C < 0, use the inverted operator. */
491 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
492 (neg_op @1 @2)))))))
493
5711ac88
N
494/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
495(for div (trunc_div ceil_div floor_div round_div exact_div)
496 (simplify
497 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
498 (if (integer_pow2p (@2)
499 && tree_int_cst_sgn (@2) > 0
a1488398 500 && tree_nop_conversion_p (type, TREE_TYPE (@0))
8e6cdc90
RS
501 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
502 (rshift (convert @0)
503 { build_int_cst (integer_type_node,
504 wi::exact_log2 (wi::to_wide (@2))); }))))
5711ac88 505
a7f24614
RB
506/* If ARG1 is a constant, we can convert this to a multiply by the
507 reciprocal. This does not have the same rounding properties,
508 so only do this if -freciprocal-math. We can actually
509 always safely do it if ARG1 is a power of two, but it's hard to
510 tell if it is or not in a portable manner. */
511(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
512 (simplify
513 (rdiv @0 cst@1)
514 (if (optimize)
53bc4b3a
RB
515 (if (flag_reciprocal_math
516 && !real_zerop (@1))
a7f24614 517 (with
249700b5 518 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 519 (if (tem)
8fdc6c67
RB
520 (mult @0 { tem; } )))
521 (if (cst != COMPLEX_CST)
522 (with { tree inverse = exact_inverse (type, @1); }
523 (if (inverse)
524 (mult @0 { inverse; } ))))))))
a7f24614 525
a7f24614 526(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
527 /* 0 % X is always zero. */
528 (simplify
a7f24614 529 (mod integer_zerop@0 @1)
e0ee10ed
RB
530 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
531 (if (!integer_zerop (@1))
532 @0))
533 /* X % 1 is always zero. */
534 (simplify
a7f24614
RB
535 (mod @0 integer_onep)
536 { build_zero_cst (type); })
537 /* X % -1 is zero. */
538 (simplify
09240451
MG
539 (mod @0 integer_minus_onep@1)
540 (if (!TYPE_UNSIGNED (type))
bc4315fb 541 { build_zero_cst (type); }))
5b7f6ed0
MG
542 /* X % X is zero. */
543 (simplify
544 (mod @0 @0)
545 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
546 (if (!integer_zerop (@0))
547 { build_zero_cst (type); }))
bc4315fb
MG
548 /* (X % Y) % Y is just X % Y. */
549 (simplify
550 (mod (mod@2 @0 @1) @1)
98e30e51
RB
551 @2)
552 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
553 (simplify
554 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
555 (if (ANY_INTEGRAL_TYPE_P (type)
556 && TYPE_OVERFLOW_UNDEFINED (type)
8e6cdc90
RS
557 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
558 TYPE_SIGN (type)))
392750c5
JJ
559 { build_zero_cst (type); }))
560 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
561 modulo and comparison, since it is simpler and equivalent. */
562 (for cmp (eq ne)
563 (simplify
564 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
565 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
566 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
567 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
a7f24614
RB
568
569/* X % -C is the same as X % C. */
570(simplify
571 (trunc_mod @0 INTEGER_CST@1)
572 (if (TYPE_SIGN (type) == SIGNED
573 && !TREE_OVERFLOW (@1)
8e6cdc90 574 && wi::neg_p (wi::to_wide (@1))
a7f24614
RB
575 && !TYPE_OVERFLOW_TRAPS (type)
576 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
577 && !sign_bit_p (@1, @1))
578 (trunc_mod @0 (negate @1))))
e0ee10ed 579
8f0c696a
RB
580/* X % -Y is the same as X % Y. */
581(simplify
582 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
583 (if (INTEGRAL_TYPE_P (type)
584 && !TYPE_UNSIGNED (type)
8f0c696a 585 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
586 && tree_nop_conversion_p (type, TREE_TYPE (@1))
587 /* Avoid this transformation if X might be INT_MIN or
588 Y might be -1, because we would then change valid
589 INT_MIN % -(-1) into invalid INT_MIN % -1. */
8e6cdc90 590 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
20b8d734
JJ
591 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
592 (TREE_TYPE (@1))))))
8f0c696a
RB
593 (trunc_mod @0 (convert @1))))
594
f461569a
MP
595/* X - (X / Y) * Y is the same as X % Y. */
596(simplify
2eef1fc1
RB
597 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
598 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
fba46f03 599 (convert (trunc_mod @0 @1))))
f461569a 600
8f0c696a
RB
601/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
602 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
603 Also optimize A % (C << N) where C is a power of 2,
604 to A & ((C << N) - 1). */
605(match (power_of_two_cand @1)
606 INTEGER_CST@1)
607(match (power_of_two_cand @1)
608 (lshift INTEGER_CST@1 @2))
609(for mod (trunc_mod floor_mod)
610 (simplify
4ab1e111 611 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
612 (if ((TYPE_UNSIGNED (type)
613 || tree_expr_nonnegative_p (@0))
4ab1e111 614 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 615 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 616 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 617
887ab609
N
618/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
619(simplify
620 (trunc_div (mult @0 integer_pow2p@1) @1)
621 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
622 (bit_and @0 { wide_int_to_tree
8e6cdc90
RS
623 (type, wi::mask (TYPE_PRECISION (type)
624 - wi::exact_log2 (wi::to_wide (@1)),
887ab609
N
625 false, TYPE_PRECISION (type))); })))
626
5f8d832e
N
627/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
628(simplify
629 (mult (trunc_div @0 integer_pow2p@1) @1)
630 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
631 (bit_and @0 (negate @1))))
632
95765f36
N
633/* Simplify (t * 2) / 2) -> t. */
634(for div (trunc_div ceil_div floor_div round_div exact_div)
635 (simplify
55d84e61 636 (div (mult:c @0 @1) @1)
95765f36
N
637 (if (ANY_INTEGRAL_TYPE_P (type)
638 && TYPE_OVERFLOW_UNDEFINED (type))
639 @0)))
640
d202f9bd 641(for op (negate abs)
9b054b08
RS
642 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
643 (for coss (COS COSH)
644 (simplify
645 (coss (op @0))
646 (coss @0)))
647 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
648 (for pows (POW)
649 (simplify
650 (pows (op @0) REAL_CST@1)
651 (with { HOST_WIDE_INT n; }
652 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 653 (pows @0 @1)))))
de3fbea3
RB
654 /* Likewise for powi. */
655 (for pows (POWI)
656 (simplify
657 (pows (op @0) INTEGER_CST@1)
8e6cdc90 658 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 659 (pows @0 @1))))
5d3498b4
RS
660 /* Strip negate and abs from both operands of hypot. */
661 (for hypots (HYPOT)
662 (simplify
663 (hypots (op @0) @1)
664 (hypots @0 @1))
665 (simplify
666 (hypots @0 (op @1))
667 (hypots @0 @1)))
668 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
c6cfa2bf 669 (for copysigns (COPYSIGN_ALL)
5d3498b4
RS
670 (simplify
671 (copysigns (op @0) @1)
672 (copysigns @0 @1))))
673
674/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
675(simplify
676 (mult (abs@1 @0) @1)
677 (mult @0 @0))
678
64f7ea7c
KV
679/* Convert absu(x)*absu(x) -> x*x. */
680(simplify
681 (mult (absu@1 @0) @1)
682 (mult (convert@2 @0) @2))
683
5d3498b4
RS
684/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
685(for coss (COS COSH)
686 copysigns (COPYSIGN)
687 (simplify
688 (coss (copysigns @0 @1))
689 (coss @0)))
690
691/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
692(for pows (POW)
693 copysigns (COPYSIGN)
694 (simplify
de3fbea3 695 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
696 (with { HOST_WIDE_INT n; }
697 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
698 (pows @0 @1)))))
de3fbea3
RB
699/* Likewise for powi. */
700(for pows (POWI)
701 copysigns (COPYSIGN)
702 (simplify
703 (pows (copysigns @0 @2) INTEGER_CST@1)
8e6cdc90 704 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 705 (pows @0 @1))))
5d3498b4
RS
706
707(for hypots (HYPOT)
708 copysigns (COPYSIGN)
709 /* hypot(copysign(x, y), z) -> hypot(x, z). */
710 (simplify
711 (hypots (copysigns @0 @1) @2)
712 (hypots @0 @2))
713 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
714 (simplify
715 (hypots @0 (copysigns @1 @2))
716 (hypots @0 @1)))
717
eeb57981 718/* copysign(x, CST) -> [-]abs (x). */
c6cfa2bf 719(for copysigns (COPYSIGN_ALL)
eeb57981
RB
720 (simplify
721 (copysigns @0 REAL_CST@1)
722 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
723 (negate (abs @0))
724 (abs @0))))
725
5d3498b4 726/* copysign(copysign(x, y), z) -> copysign(x, z). */
c6cfa2bf 727(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
728 (simplify
729 (copysigns (copysigns @0 @1) @2)
730 (copysigns @0 @2)))
731
732/* copysign(x,y)*copysign(x,y) -> x*x. */
c6cfa2bf 733(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
734 (simplify
735 (mult (copysigns@2 @0 @1) @2)
736 (mult @0 @0)))
737
738/* ccos(-x) -> ccos(x). Similarly for ccosh. */
739(for ccoss (CCOS CCOSH)
740 (simplify
741 (ccoss (negate @0))
742 (ccoss @0)))
d202f9bd 743
abcc43f5
RS
744/* cabs(-x) and cos(conj(x)) -> cabs(x). */
745(for ops (conj negate)
746 (for cabss (CABS)
747 (simplify
748 (cabss (ops @0))
749 (cabss @0))))
750
0a8f32b8
RB
751/* Fold (a * (1 << b)) into (a << b) */
752(simplify
753 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
754 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 755 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
756 (lshift @0 @2)))
757
4349b15f
SD
758/* Fold (1 << (C - x)) where C = precision(type) - 1
759 into ((1 << C) >> x). */
760(simplify
761 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
762 (if (INTEGRAL_TYPE_P (type)
56ccfbd6 763 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
4349b15f
SD
764 && single_use (@1))
765 (if (TYPE_UNSIGNED (type))
766 (rshift (lshift @0 @2) @3)
767 (with
768 { tree utype = unsigned_type_for (type); }
769 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
770
0a8f32b8
RB
771/* Fold (C1/X)*C2 into (C1*C2)/X. */
772(simplify
ff86345f
RB
773 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
774 (if (flag_associative_math
775 && single_use (@3))
0a8f32b8
RB
776 (with
777 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
778 (if (tem)
779 (rdiv { tem; } @1)))))
780
781/* Simplify ~X & X as zero. */
782(simplify
783 (bit_and:c (convert? @0) (convert? (bit_not @0)))
784 { build_zero_cst (type); })
785
89b80c42
PK
786/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
787(simplify
788 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
789 (if (TYPE_UNSIGNED (type))
790 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
791
7aa13860
PK
792(for bitop (bit_and bit_ior)
793 cmp (eq ne)
a93952d2
JJ
794 /* PR35691: Transform
795 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
796 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
7aa13860
PK
797 (simplify
798 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
799 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
a93952d2
JJ
800 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
801 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
802 (cmp (bit_ior @0 (convert @1)) @2)))
803 /* Transform:
804 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
805 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
806 (simplify
807 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
808 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
809 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
810 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
811 (cmp (bit_and @0 (convert @1)) @2))))
7aa13860 812
10158317
RB
813/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
814(simplify
a9658b11 815 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
816 (minus (bit_xor @0 @1) @1))
817(simplify
818 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
8e6cdc90 819 (if (~wi::to_wide (@2) == wi::to_wide (@1))
10158317
RB
820 (minus (bit_xor @0 @1) @1)))
821
822/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
823(simplify
a8e9f9a3 824 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
825 (minus @1 (bit_xor @0 @1)))
826
42bd89ce
MG
827/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
828(for op (bit_ior bit_xor plus)
829 (simplify
830 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
831 (bit_xor @0 @1))
832 (simplify
833 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
8e6cdc90 834 (if (~wi::to_wide (@2) == wi::to_wide (@1))
42bd89ce 835 (bit_xor @0 @1))))
2066ef6a
PK
836
837/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
838(simplify
839 (bit_ior:c (bit_xor:c @0 @1) @0)
840 (bit_ior @0 @1))
841
e268a77b
MG
842/* (a & ~b) | (a ^ b) --> a ^ b */
843(simplify
844 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
845 @2)
846
847/* (a & ~b) ^ ~a --> ~(a & b) */
848(simplify
849 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
850 (bit_not (bit_and @0 @1)))
851
52792faa
KK
852/* (~a & b) ^ a --> (a | b) */
853(simplify
854 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
855 (bit_ior @0 @1))
856
e268a77b
MG
857/* (a | b) & ~(a ^ b) --> a & b */
858(simplify
859 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
860 (bit_and @0 @1))
861
862/* a | ~(a ^ b) --> a | ~b */
863(simplify
864 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
865 (bit_ior @0 (bit_not @1)))
866
867/* (a | b) | (a &^ b) --> a | b */
868(for op (bit_and bit_xor)
869 (simplify
870 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
871 @2))
872
873/* (a & b) | ~(a ^ b) --> ~(a ^ b) */
874(simplify
875 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
876 @2)
877
878/* ~(~a & b) --> a | ~b */
879(simplify
880 (bit_not (bit_and:cs (bit_not @0) @1))
881 (bit_ior @0 (bit_not @1)))
882
fd8303a5
MC
883/* ~(~a | b) --> a & ~b */
884(simplify
885 (bit_not (bit_ior:cs (bit_not @0) @1))
886 (bit_and @0 (bit_not @1)))
887
d982c5b7
MG
888/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
889#if GIMPLE
890(simplify
891 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
892 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 893 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
d982c5b7
MG
894 (bit_xor @0 @1)))
895#endif
10158317 896
f2901002
JJ
897/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
898 ((A & N) + B) & M -> (A + B) & M
899 Similarly if (N & M) == 0,
900 ((A | N) + B) & M -> (A + B) & M
901 and for - instead of + (or unary - instead of +)
902 and/or ^ instead of |.
903 If B is constant and (B & M) == 0, fold into A & M. */
904(for op (plus minus)
905 (for bitop (bit_and bit_ior bit_xor)
906 (simplify
907 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
908 (with
909 { tree pmop[2];
910 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
911 @3, @4, @1, ERROR_MARK, NULL_TREE,
912 NULL_TREE, pmop); }
913 (if (utype)
914 (convert (bit_and (op (convert:utype { pmop[0]; })
915 (convert:utype { pmop[1]; }))
916 (convert:utype @2))))))
917 (simplify
918 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
919 (with
920 { tree pmop[2];
921 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
922 NULL_TREE, NULL_TREE, @1, bitop, @3,
923 @4, pmop); }
924 (if (utype)
925 (convert (bit_and (op (convert:utype { pmop[0]; })
926 (convert:utype { pmop[1]; }))
927 (convert:utype @2)))))))
928 (simplify
929 (bit_and (op:s @0 @1) INTEGER_CST@2)
930 (with
931 { tree pmop[2];
932 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
933 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
934 NULL_TREE, NULL_TREE, pmop); }
935 (if (utype)
936 (convert (bit_and (op (convert:utype { pmop[0]; })
937 (convert:utype { pmop[1]; }))
938 (convert:utype @2)))))))
939(for bitop (bit_and bit_ior bit_xor)
940 (simplify
941 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
942 (with
943 { tree pmop[2];
944 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
945 bitop, @2, @3, NULL_TREE, ERROR_MARK,
946 NULL_TREE, NULL_TREE, pmop); }
947 (if (utype)
948 (convert (bit_and (negate (convert:utype { pmop[0]; }))
949 (convert:utype @1)))))))
950
bc4315fb
MG
951/* X % Y is smaller than Y. */
952(for cmp (lt ge)
953 (simplify
954 (cmp (trunc_mod @0 @1) @1)
955 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
956 { constant_boolean_node (cmp == LT_EXPR, type); })))
957(for cmp (gt le)
958 (simplify
959 (cmp @1 (trunc_mod @0 @1))
960 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
961 { constant_boolean_node (cmp == GT_EXPR, type); })))
962
e0ee10ed
RB
963/* x | ~0 -> ~0 */
964(simplify
ca0b7ece
RB
965 (bit_ior @0 integer_all_onesp@1)
966 @1)
967
968/* x | 0 -> x */
969(simplify
970 (bit_ior @0 integer_zerop)
971 @0)
e0ee10ed
RB
972
973/* x & 0 -> 0 */
974(simplify
ca0b7ece
RB
975 (bit_and @0 integer_zerop@1)
976 @1)
e0ee10ed 977
a4398a30 978/* ~x | x -> -1 */
8b5ee871
MG
979/* ~x ^ x -> -1 */
980/* ~x + x -> -1 */
981(for op (bit_ior bit_xor plus)
982 (simplify
983 (op:c (convert? @0) (convert? (bit_not @0)))
984 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 985
e0ee10ed
RB
986/* x ^ x -> 0 */
987(simplify
988 (bit_xor @0 @0)
989 { build_zero_cst (type); })
990
36a60e48
RB
991/* Canonicalize X ^ ~0 to ~X. */
992(simplify
993 (bit_xor @0 integer_all_onesp@1)
994 (bit_not @0))
995
996/* x & ~0 -> x */
997(simplify
998 (bit_and @0 integer_all_onesp)
999 (non_lvalue @0))
1000
1001/* x & x -> x, x | x -> x */
1002(for bitop (bit_and bit_ior)
1003 (simplify
1004 (bitop @0 @0)
1005 (non_lvalue @0)))
1006
c7986356
MG
1007/* x & C -> x if we know that x & ~C == 0. */
1008#if GIMPLE
1009(simplify
1010 (bit_and SSA_NAME@0 INTEGER_CST@1)
1011 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 1012 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
c7986356
MG
1013 @0))
1014#endif
1015
0f770b01
RV
1016/* x + (x & 1) -> (x + 1) & ~1 */
1017(simplify
44fc0a51
RB
1018 (plus:c @0 (bit_and:s @0 integer_onep@1))
1019 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
1020
1021/* x & ~(x & y) -> x & ~y */
1022/* x | ~(x | y) -> x | ~y */
1023(for bitop (bit_and bit_ior)
af563d4b 1024 (simplify
44fc0a51
RB
1025 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1026 (bitop @0 (bit_not @1))))
af563d4b 1027
03cc70b5
MC
1028/* (~x & y) | ~(x | y) -> ~x */
1029(simplify
1030 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1031 @2)
1032
1033/* (x | y) ^ (x | ~y) -> ~x */
1034(simplify
1035 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1036 (bit_not @0))
1037
1038/* (x & y) | ~(x | y) -> ~(x ^ y) */
1039(simplify
1040 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1041 (bit_not (bit_xor @0 @1)))
1042
1043/* (~x | y) ^ (x ^ y) -> x | ~y */
1044(simplify
1045 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1046 (bit_ior @0 (bit_not @1)))
1047
1048/* (x ^ y) | ~(x | y) -> ~(x & y) */
1049(simplify
1050 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1051 (bit_not (bit_and @0 @1)))
1052
af563d4b
MG
1053/* (x | y) & ~x -> y & ~x */
1054/* (x & y) | ~x -> y | ~x */
1055(for bitop (bit_and bit_ior)
1056 rbitop (bit_ior bit_and)
1057 (simplify
1058 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1059 (bitop @1 @2)))
0f770b01 1060
f13c4673
MP
1061/* (x & y) ^ (x | y) -> x ^ y */
1062(simplify
2d6f2dce
MP
1063 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1064 (bit_xor @0 @1))
f13c4673 1065
9ea65ca6
MP
1066/* (x ^ y) ^ (x | y) -> x & y */
1067(simplify
1068 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1069 (bit_and @0 @1))
1070
1071/* (x & y) + (x ^ y) -> x | y */
1072/* (x & y) | (x ^ y) -> x | y */
1073/* (x & y) ^ (x ^ y) -> x | y */
1074(for op (plus bit_ior bit_xor)
1075 (simplify
1076 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1077 (bit_ior @0 @1)))
1078
1079/* (x & y) + (x | y) -> x + y */
1080(simplify
1081 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1082 (plus @0 @1))
1083
9737efaf
MP
1084/* (x + y) - (x | y) -> x & y */
1085(simplify
1086 (minus (plus @0 @1) (bit_ior @0 @1))
1087 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1088 && !TYPE_SATURATING (type))
1089 (bit_and @0 @1)))
1090
1091/* (x + y) - (x & y) -> x | y */
1092(simplify
1093 (minus (plus @0 @1) (bit_and @0 @1))
1094 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1095 && !TYPE_SATURATING (type))
1096 (bit_ior @0 @1)))
1097
9ea65ca6
MP
1098/* (x | y) - (x ^ y) -> x & y */
1099(simplify
1100 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1101 (bit_and @0 @1))
1102
1103/* (x | y) - (x & y) -> x ^ y */
1104(simplify
1105 (minus (bit_ior @0 @1) (bit_and @0 @1))
1106 (bit_xor @0 @1))
1107
66cc6273
MP
1108/* (x | y) & ~(x & y) -> x ^ y */
1109(simplify
1110 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1111 (bit_xor @0 @1))
1112
1113/* (x | y) & (~x ^ y) -> x & y */
1114(simplify
1115 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1116 (bit_and @0 @1))
1117
fd8303a5
MC
1118/* (~x | y) & (x | ~y) -> ~(x ^ y) */
1119(simplify
1120 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1121 (bit_not (bit_xor @0 @1)))
1122
1123/* (~x | y) ^ (x | ~y) -> x ^ y */
1124(simplify
1125 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1126 (bit_xor @0 @1))
1127
5b00d921
RB
1128/* ~x & ~y -> ~(x | y)
1129 ~x | ~y -> ~(x & y) */
1130(for op (bit_and bit_ior)
1131 rop (bit_ior bit_and)
1132 (simplify
1133 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1134 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1135 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1136 (bit_not (rop (convert @0) (convert @1))))))
1137
14ea9f92 1138/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
1139 with a constant, and the two constants have no bits in common,
1140 we should treat this as a BIT_IOR_EXPR since this may produce more
1141 simplifications. */
14ea9f92
RB
1142(for op (bit_xor plus)
1143 (simplify
1144 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1145 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1146 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1147 && tree_nop_conversion_p (type, TREE_TYPE (@2))
8e6cdc90 1148 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
14ea9f92 1149 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
1150
1151/* (X | Y) ^ X -> Y & ~ X*/
1152(simplify
2eef1fc1 1153 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
5b00d921
RB
1154 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1155 (convert (bit_and @1 (bit_not @0)))))
1156
1157/* Convert ~X ^ ~Y to X ^ Y. */
1158(simplify
1159 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1160 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1161 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1162 (bit_xor (convert @0) (convert @1))))
1163
1164/* Convert ~X ^ C to X ^ ~C. */
1165(simplify
1166 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
1167 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1168 (bit_xor (convert @0) (bit_not @1))))
5b00d921 1169
e39dab2c
MG
1170/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1171(for opo (bit_and bit_xor)
1172 opi (bit_xor bit_and)
1173 (simplify
de5b5228 1174 (opo:c (opi:cs @0 @1) @1)
e39dab2c 1175 (bit_and (bit_not @0) @1)))
97e77391 1176
14ea9f92
RB
1177/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1178 operands are another bit-wise operation with a common input. If so,
1179 distribute the bit operations to save an operation and possibly two if
1180 constants are involved. For example, convert
1181 (A | B) & (A | C) into A | (B & C)
1182 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
1183(for op (bit_and bit_ior bit_xor)
1184 rop (bit_ior bit_and bit_and)
14ea9f92 1185 (simplify
2eef1fc1 1186 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
e07ab2fe
MG
1187 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1188 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
1189 (rop (convert @0) (op (convert @1) (convert @2))))))
1190
e39dab2c
MG
1191/* Some simple reassociation for bit operations, also handled in reassoc. */
1192/* (X & Y) & Y -> X & Y
1193 (X | Y) | Y -> X | Y */
1194(for op (bit_and bit_ior)
1195 (simplify
2eef1fc1 1196 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
e39dab2c
MG
1197 @2))
1198/* (X ^ Y) ^ Y -> X */
1199(simplify
2eef1fc1 1200 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
ece46666 1201 (convert @0))
e39dab2c
MG
1202/* (X & Y) & (X & Z) -> (X & Y) & Z
1203 (X | Y) | (X | Z) -> (X | Y) | Z */
1204(for op (bit_and bit_ior)
1205 (simplify
6c35e5b0 1206 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
e39dab2c
MG
1207 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1208 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1209 (if (single_use (@5) && single_use (@6))
1210 (op @3 (convert @2))
1211 (if (single_use (@3) && single_use (@4))
1212 (op (convert @1) @5))))))
1213/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1214(simplify
1215 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1216 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1217 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 1218 (bit_xor (convert @1) (convert @2))))
5b00d921 1219
64f7ea7c
KV
1220/* Convert abs (abs (X)) into abs (X).
1221 also absu (absu (X)) into absu (X). */
b14a9c57
RB
1222(simplify
1223 (abs (abs@1 @0))
1224 @1)
64f7ea7c
KV
1225
1226(simplify
1227 (absu (convert@2 (absu@1 @0)))
1228 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1229 @1))
1230
1231/* Convert abs[u] (-X) -> abs[u] (X). */
f3582e54
RB
1232(simplify
1233 (abs (negate @0))
1234 (abs @0))
64f7ea7c
KV
1235
1236(simplify
1237 (absu (negate @0))
1238 (absu @0))
1239
1240/* Convert abs[u] (X) where X is nonnegative -> (X). */
f3582e54
RB
1241(simplify
1242 (abs tree_expr_nonnegative_p@0)
1243 @0)
1244
64f7ea7c
KV
1245(simplify
1246 (absu tree_expr_nonnegative_p@0)
1247 (convert @0))
1248
55cf3946
RB
1249/* A few cases of fold-const.c negate_expr_p predicate. */
1250(match negate_expr_p
1251 INTEGER_CST
b14a9c57 1252 (if ((INTEGRAL_TYPE_P (type)
56a6d474 1253 && TYPE_UNSIGNED (type))
b14a9c57 1254 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
1255 && may_negate_without_overflow_p (t)))))
1256(match negate_expr_p
1257 FIXED_CST)
1258(match negate_expr_p
1259 (negate @0)
1260 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1261(match negate_expr_p
1262 REAL_CST
1263 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1264/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1265 ways. */
1266(match negate_expr_p
1267 VECTOR_CST
1268 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
81bd903a
MG
1269(match negate_expr_p
1270 (minus @0 @1)
1271 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1272 || (FLOAT_TYPE_P (type)
1273 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1274 && !HONOR_SIGNED_ZEROS (type)))))
0a8f32b8
RB
1275
1276/* (-A) * (-B) -> A * B */
1277(simplify
1278 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1279 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1280 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1281 (mult (convert @0) (convert (negate @1)))))
03cc70b5 1282
55cf3946 1283/* -(A + B) -> (-B) - A. */
b14a9c57 1284(simplify
55cf3946
RB
1285 (negate (plus:c @0 negate_expr_p@1))
1286 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1287 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1288 (minus (negate @1) @0)))
1289
81bd903a
MG
1290/* -(A - B) -> B - A. */
1291(simplify
1292 (negate (minus @0 @1))
1293 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1294 || (FLOAT_TYPE_P (type)
1295 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1296 && !HONOR_SIGNED_ZEROS (type)))
1297 (minus @1 @0)))
1af4ebf5
MG
1298(simplify
1299 (negate (pointer_diff @0 @1))
1300 (if (TYPE_OVERFLOW_UNDEFINED (type))
1301 (pointer_diff @1 @0)))
81bd903a 1302
55cf3946 1303/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 1304(simplify
55cf3946 1305 (minus @0 negate_expr_p@1)
e4e96a4f
KT
1306 (if (!FIXED_POINT_TYPE_P (type))
1307 (plus @0 (negate @1))))
d4573ffe 1308
5609420f
RB
1309/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1310 when profitable.
1311 For bitwise binary operations apply operand conversions to the
1312 binary operation result instead of to the operands. This allows
1313 to combine successive conversions and bitwise binary operations.
1314 We combine the above two cases by using a conditional convert. */
1315(for bitop (bit_and bit_ior bit_xor)
1316 (simplify
1317 (bitop (convert @0) (convert? @1))
1318 (if (((TREE_CODE (@1) == INTEGER_CST
1319 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 1320 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 1321 || types_match (@0, @1))
ad6f996c
RB
1322 /* ??? This transform conflicts with fold-const.c doing
1323 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1324 constants (if x has signed type, the sign bit cannot be set
1325 in c). This folds extension into the BIT_AND_EXPR.
1326 Restrict it to GIMPLE to avoid endless recursions. */
1327 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
1328 && (/* That's a good idea if the conversion widens the operand, thus
1329 after hoisting the conversion the operation will be narrower. */
1330 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1331 /* It's also a good idea if the conversion is to a non-integer
1332 mode. */
1333 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1334 /* Or if the precision of TO is not the same as the precision
1335 of its mode. */
2be65d9e 1336 || !type_has_mode_precision_p (type)))
5609420f
RB
1337 (convert (bitop @0 (convert @1))))))
1338
b14a9c57
RB
1339(for bitop (bit_and bit_ior)
1340 rbitop (bit_ior bit_and)
1341 /* (x | y) & x -> x */
1342 /* (x & y) | x -> x */
1343 (simplify
1344 (bitop:c (rbitop:c @0 @1) @0)
1345 @0)
1346 /* (~x | y) & x -> x & y */
1347 /* (~x & y) | x -> x | y */
1348 (simplify
1349 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1350 (bitop @0 @1)))
1351
5609420f
RB
1352/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1353(simplify
1354 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1355 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1356
1357/* Combine successive equal operations with constants. */
1358(for bitop (bit_and bit_ior bit_xor)
1359 (simplify
1360 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
fba05d9e
RS
1361 (if (!CONSTANT_CLASS_P (@0))
1362 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1363 folded to a constant. */
1364 (bitop @0 (bitop @1 @2))
1365 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1366 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1367 the values involved are such that the operation can't be decided at
1368 compile time. Try folding one of @0 or @1 with @2 to see whether
1369 that combination can be decided at compile time.
1370
1371 Keep the existing form if both folds fail, to avoid endless
1372 oscillation. */
1373 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1374 (if (cst1)
1375 (bitop @1 { cst1; })
1376 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1377 (if (cst2)
1378 (bitop @0 { cst2; }))))))))
5609420f
RB
1379
1380/* Try simple folding for X op !X, and X op X with the help
1381 of the truth_valued_p and logical_inverted_value predicates. */
1382(match truth_valued_p
1383 @0
1384 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 1385(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
1386 (match truth_valued_p
1387 (op @0 @1)))
1388(match truth_valued_p
1389 (truth_not @0))
1390
0a8f32b8
RB
1391(match (logical_inverted_value @0)
1392 (truth_not @0))
5609420f
RB
1393(match (logical_inverted_value @0)
1394 (bit_not truth_valued_p@0))
1395(match (logical_inverted_value @0)
09240451 1396 (eq @0 integer_zerop))
5609420f 1397(match (logical_inverted_value @0)
09240451 1398 (ne truth_valued_p@0 integer_truep))
5609420f 1399(match (logical_inverted_value @0)
09240451 1400 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
1401
1402/* X & !X -> 0. */
1403(simplify
1404 (bit_and:c @0 (logical_inverted_value @0))
1405 { build_zero_cst (type); })
1406/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1407(for op (bit_ior bit_xor)
1408 (simplify
1409 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 1410 { constant_boolean_node (true, type); }))
59c20dc7
RB
1411/* X ==/!= !X is false/true. */
1412(for op (eq ne)
1413 (simplify
1414 (op:c truth_valued_p@0 (logical_inverted_value @0))
1415 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 1416
5609420f
RB
1417/* ~~x -> x */
1418(simplify
1419 (bit_not (bit_not @0))
1420 @0)
1421
b14a9c57
RB
1422/* Convert ~ (-A) to A - 1. */
1423(simplify
1424 (bit_not (convert? (negate @0)))
ece46666
MG
1425 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1426 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 1427 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57 1428
81bd903a
MG
1429/* Convert - (~A) to A + 1. */
1430(simplify
1431 (negate (nop_convert (bit_not @0)))
1432 (plus (view_convert @0) { build_each_one_cst (type); }))
1433
b14a9c57
RB
1434/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1435(simplify
8b5ee871 1436 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
1437 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1438 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1439 (convert (negate @0))))
1440(simplify
1441 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
1442 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1443 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1444 (convert (negate @0))))
1445
1446/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1447(simplify
1448 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1449 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1450 (convert (bit_xor @0 (bit_not @1)))))
1451(simplify
1452 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1453 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1454 (convert (bit_xor @0 @1))))
1455
e268a77b
MG
1456/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1457(simplify
1458 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1459 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1460 (bit_not (bit_xor (view_convert @0) @1))))
1461
f52baa7b
MP
1462/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1463(simplify
44fc0a51
RB
1464 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1465 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 1466
f7b7b0aa
MP
1467/* Fold A - (A & B) into ~B & A. */
1468(simplify
2eef1fc1 1469 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
f7b7b0aa
MP
1470 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1471 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1472 (convert (bit_and (bit_not @1) @0))))
5609420f 1473
2071f8f9
N
1474/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1475(for cmp (gt lt ge le)
1476(simplify
1477 (mult (convert (cmp @0 @1)) @2)
1478 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1479
e36c1cfe
N
1480/* For integral types with undefined overflow and C != 0 fold
1481 x * C EQ/NE y * C into x EQ/NE y. */
1482(for cmp (eq ne)
1483 (simplify
1484 (cmp (mult:c @0 @1) (mult:c @2 @1))
1485 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1486 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1487 && tree_expr_nonzero_p (@1))
1488 (cmp @0 @2))))
1489
42bd89ce
MG
1490/* For integral types with wrapping overflow and C odd fold
1491 x * C EQ/NE y * C into x EQ/NE y. */
1492(for cmp (eq ne)
1493 (simplify
1494 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1495 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1496 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1497 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1498 (cmp @0 @2))))
1499
e36c1cfe
N
1500/* For integral types with undefined overflow and C != 0 fold
1501 x * C RELOP y * C into:
84ff66b8 1502
e36c1cfe
N
1503 x RELOP y for nonnegative C
1504 y RELOP x for negative C */
1505(for cmp (lt gt le ge)
1506 (simplify
1507 (cmp (mult:c @0 @1) (mult:c @2 @1))
1508 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1509 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1510 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1511 (cmp @0 @2)
1512 (if (TREE_CODE (@1) == INTEGER_CST
8e6cdc90 1513 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
e36c1cfe 1514 (cmp @2 @0))))))
84ff66b8 1515
564e405c
JJ
1516/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1517(for cmp (le gt)
1518 icmp (gt le)
1519 (simplify
1520 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1521 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1522 && TYPE_UNSIGNED (TREE_TYPE (@0))
1523 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
8e6cdc90
RS
1524 && (wi::to_wide (@2)
1525 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
564e405c
JJ
1526 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1527 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1528
a8492d5e
MG
1529/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1530(for cmp (simple_comparison)
1531 (simplify
9adfa8e2
MG
1532 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
1533 (if (element_precision (@3) >= element_precision (@0)
1534 && types_match (@0, @1))
9cf60d3b 1535 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
9adfa8e2
MG
1536 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
1537 (cmp @1 @0)
1538 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
1539 (with
1540 {
1541 tree utype = unsigned_type_for (TREE_TYPE (@0));
1542 }
1543 (cmp (convert:utype @1) (convert:utype @0)))))
1544 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
1545 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
1546 (cmp @0 @1)
1547 (with
1548 {
1549 tree utype = unsigned_type_for (TREE_TYPE (@0));
1550 }
1551 (cmp (convert:utype @0) (convert:utype @1)))))))))
a8492d5e 1552
8d1628eb
JJ
1553/* X / C1 op C2 into a simple range test. */
1554(for cmp (simple_comparison)
1555 (simplify
1556 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1557 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1558 && integer_nonzerop (@1)
1559 && !TREE_OVERFLOW (@1)
1560 && !TREE_OVERFLOW (@2))
1561 (with { tree lo, hi; bool neg_overflow;
1562 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1563 &neg_overflow); }
1564 (switch
1565 (if (code == LT_EXPR || code == GE_EXPR)
1566 (if (TREE_OVERFLOW (lo))
1567 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1568 (if (code == LT_EXPR)
1569 (lt @0 { lo; })
1570 (ge @0 { lo; }))))
1571 (if (code == LE_EXPR || code == GT_EXPR)
1572 (if (TREE_OVERFLOW (hi))
1573 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1574 (if (code == LE_EXPR)
1575 (le @0 { hi; })
1576 (gt @0 { hi; }))))
1577 (if (!lo && !hi)
1578 { build_int_cst (type, code == NE_EXPR); })
1579 (if (code == EQ_EXPR && !hi)
1580 (ge @0 { lo; }))
1581 (if (code == EQ_EXPR && !lo)
1582 (le @0 { hi; }))
1583 (if (code == NE_EXPR && !hi)
1584 (lt @0 { lo; }))
1585 (if (code == NE_EXPR && !lo)
1586 (gt @0 { hi; }))
1587 (if (GENERIC)
1588 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1589 lo, hi); })
1590 (with
1591 {
1592 tree etype = range_check_type (TREE_TYPE (@0));
1593 if (etype)
1594 {
8d1628eb
JJ
1595 hi = fold_convert (etype, hi);
1596 lo = fold_convert (etype, lo);
1597 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1598 }
1599 }
1600 (if (etype && hi && !TREE_OVERFLOW (hi))
1601 (if (code == EQ_EXPR)
1602 (le (minus (convert:etype @0) { lo; }) { hi; })
1603 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1604
d35256b6
MG
1605/* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1606(for op (lt le ge gt)
1607 (simplify
1608 (op (plus:c @0 @2) (plus:c @1 @2))
1609 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1610 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1611 (op @0 @1))))
1612/* For equality and subtraction, this is also true with wrapping overflow. */
1613(for op (eq ne minus)
1614 (simplify
1615 (op (plus:c @0 @2) (plus:c @1 @2))
1616 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1617 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1618 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1619 (op @0 @1))))
1620
1621/* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1622(for op (lt le ge gt)
1623 (simplify
1624 (op (minus @0 @2) (minus @1 @2))
1625 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1626 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1627 (op @0 @1))))
1628/* For equality and subtraction, this is also true with wrapping overflow. */
1629(for op (eq ne minus)
1630 (simplify
1631 (op (minus @0 @2) (minus @1 @2))
1632 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1633 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1634 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1635 (op @0 @1))))
1af4ebf5
MG
1636/* And for pointers... */
1637(for op (simple_comparison)
1638 (simplify
1639 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1640 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1641 (op @0 @1))))
1642(simplify
1643 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1644 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1645 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1646 (pointer_diff @0 @1)))
d35256b6
MG
1647
1648/* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1649(for op (lt le ge gt)
1650 (simplify
1651 (op (minus @2 @0) (minus @2 @1))
1652 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1653 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1654 (op @1 @0))))
1655/* For equality and subtraction, this is also true with wrapping overflow. */
1656(for op (eq ne minus)
1657 (simplify
1658 (op (minus @2 @0) (minus @2 @1))
1659 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1660 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1661 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1662 (op @1 @0))))
1af4ebf5
MG
1663/* And for pointers... */
1664(for op (simple_comparison)
1665 (simplify
1666 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1667 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1668 (op @1 @0))))
1669(simplify
1670 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1671 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1672 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1673 (pointer_diff @1 @0)))
d35256b6 1674
6358a676
MG
1675/* X + Y < Y is the same as X < 0 when there is no overflow. */
1676(for op (lt le gt ge)
1677 (simplify
1678 (op:c (plus:c@2 @0 @1) @1)
1679 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1680 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
cbd42900 1681 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
6358a676
MG
1682 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1683 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1684/* For equality, this is also true with wrapping overflow. */
1685(for op (eq ne)
1686 (simplify
1687 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1688 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1689 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1690 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1691 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1692 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1693 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1694 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1695 (simplify
1696 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1697 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1698 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1699 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1700 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1701
1702/* X - Y < X is the same as Y > 0 when there is no overflow.
1703 For equality, this is also true with wrapping overflow. */
1704(for op (simple_comparison)
1705 (simplify
1706 (op:c @0 (minus@2 @0 @1))
1707 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1708 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1709 || ((op == EQ_EXPR || op == NE_EXPR)
1710 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1711 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1712 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1713
1d6fadee 1714/* Transform:
b8d85005
JJ
1715 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1716 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1d6fadee
PK
1717(for cmp (eq ne)
1718 ocmp (lt ge)
1719 (simplify
1720 (cmp (trunc_div @0 @1) integer_zerop)
1721 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
b8d85005
JJ
1722 /* Complex ==/!= is allowed, but not </>=. */
1723 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1d6fadee
PK
1724 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1725 (ocmp @0 @1))))
1726
8b656ca7
MG
1727/* X == C - X can never be true if C is odd. */
1728(for cmp (eq ne)
1729 (simplify
1730 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1731 (if (TREE_INT_CST_LOW (@1) & 1)
1732 { constant_boolean_node (cmp == NE_EXPR, type); })))
1733
10bc8017
MG
1734/* Arguments on which one can call get_nonzero_bits to get the bits
1735 possibly set. */
1736(match with_possible_nonzero_bits
1737 INTEGER_CST@0)
1738(match with_possible_nonzero_bits
1739 SSA_NAME@0
1740 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1741/* Slightly extended version, do not make it recursive to keep it cheap. */
1742(match (with_possible_nonzero_bits2 @0)
1743 with_possible_nonzero_bits@0)
1744(match (with_possible_nonzero_bits2 @0)
1745 (bit_and:c with_possible_nonzero_bits@0 @2))
1746
1747/* Same for bits that are known to be set, but we do not have
1748 an equivalent to get_nonzero_bits yet. */
1749(match (with_certain_nonzero_bits2 @0)
1750 INTEGER_CST@0)
1751(match (with_certain_nonzero_bits2 @0)
1752 (bit_ior @1 INTEGER_CST@0))
1753
1754/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1755(for cmp (eq ne)
1756 (simplify
1757 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
8e6cdc90 1758 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
10bc8017
MG
1759 { constant_boolean_node (cmp == NE_EXPR, type); })))
1760
84ff66b8
AV
1761/* ((X inner_op C0) outer_op C1)
1762 With X being a tree where value_range has reasoned certain bits to always be
1763 zero throughout its computed value range,
1764 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1765 where zero_mask has 1's for all bits that are sure to be 0 in
1766 and 0's otherwise.
1767 if (inner_op == '^') C0 &= ~C1;
1768 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1769 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1770*/
1771(for inner_op (bit_ior bit_xor)
1772 outer_op (bit_xor bit_ior)
1773(simplify
1774 (outer_op
1775 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1776 (with
1777 {
1778 bool fail = false;
1779 wide_int zero_mask_not;
1780 wide_int C0;
1781 wide_int cst_emit;
1782
1783 if (TREE_CODE (@2) == SSA_NAME)
1784 zero_mask_not = get_nonzero_bits (@2);
1785 else
1786 fail = true;
1787
1788 if (inner_op == BIT_XOR_EXPR)
1789 {
8e6cdc90
RS
1790 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1791 cst_emit = C0 | wi::to_wide (@1);
84ff66b8
AV
1792 }
1793 else
1794 {
8e6cdc90
RS
1795 C0 = wi::to_wide (@0);
1796 cst_emit = C0 ^ wi::to_wide (@1);
84ff66b8
AV
1797 }
1798 }
8e6cdc90 1799 (if (!fail && (C0 & zero_mask_not) == 0)
84ff66b8 1800 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
8e6cdc90 1801 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
84ff66b8
AV
1802 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1803
a499aac5
RB
1804/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1805(simplify
44fc0a51
RB
1806 (pointer_plus (pointer_plus:s @0 @1) @3)
1807 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1808
1809/* Pattern match
1810 tem1 = (long) ptr1;
1811 tem2 = (long) ptr2;
1812 tem3 = tem2 - tem1;
1813 tem4 = (unsigned long) tem3;
1814 tem5 = ptr1 + tem4;
1815 and produce
1816 tem5 = ptr2; */
1817(simplify
1818 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1819 /* Conditionally look through a sign-changing conversion. */
1820 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1821 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1822 || (GENERIC && type == TREE_TYPE (@1))))
1823 @1))
1af4ebf5
MG
1824(simplify
1825 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1826 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1827 (convert @1)))
a499aac5
RB
1828
1829/* Pattern match
1830 tem = (sizetype) ptr;
1831 tem = tem & algn;
1832 tem = -tem;
1833 ... = ptr p+ tem;
1834 and produce the simpler and easier to analyze with respect to alignment
1835 ... = ptr & ~algn; */
1836(simplify
1837 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
8e6cdc90 1838 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
a499aac5
RB
1839 (bit_and @0 { algn; })))
1840
99e943a2
RB
1841/* Try folding difference of addresses. */
1842(simplify
1843 (minus (convert ADDR_EXPR@0) (convert @1))
1844 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1845 (with { poly_int64 diff; }
99e943a2
RB
1846 (if (ptr_difference_const (@0, @1, &diff))
1847 { build_int_cst_type (type, diff); }))))
1848(simplify
1849 (minus (convert @0) (convert ADDR_EXPR@1))
1850 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1851 (with { poly_int64 diff; }
99e943a2
RB
1852 (if (ptr_difference_const (@0, @1, &diff))
1853 { build_int_cst_type (type, diff); }))))
1af4ebf5 1854(simplify
67fccea4 1855 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1af4ebf5
MG
1856 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1857 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1858 (with { poly_int64 diff; }
1af4ebf5
MG
1859 (if (ptr_difference_const (@0, @1, &diff))
1860 { build_int_cst_type (type, diff); }))))
1861(simplify
67fccea4 1862 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1af4ebf5
MG
1863 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1864 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1865 (with { poly_int64 diff; }
1af4ebf5
MG
1866 (if (ptr_difference_const (@0, @1, &diff))
1867 { build_int_cst_type (type, diff); }))))
99e943a2 1868
bab73f11
RB
1869/* If arg0 is derived from the address of an object or function, we may
1870 be able to fold this expression using the object or function's
1871 alignment. */
1872(simplify
1873 (bit_and (convert? @0) INTEGER_CST@1)
1874 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1875 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1876 (with
1877 {
1878 unsigned int align;
1879 unsigned HOST_WIDE_INT bitpos;
1880 get_pointer_alignment_1 (@0, &align, &bitpos);
1881 }
8e6cdc90
RS
1882 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1883 { wide_int_to_tree (type, (wi::to_wide (@1)
1884 & (bitpos / BITS_PER_UNIT))); }))))
99e943a2 1885
c16504f6
LJH
1886(match min_value
1887 INTEGER_CST
1888 (if (INTEGRAL_TYPE_P (type)
1889 && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
1890
1891(match max_value
1892 INTEGER_CST
1893 (if (INTEGRAL_TYPE_P (type)
1894 && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
1895
1896/* x > y && x != XXX_MIN --> x > y
1897 x > y && x == XXX_MIN --> false . */
1898(for eqne (eq ne)
1899 (simplify
1900 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
1901 (switch
1902 (if (eqne == EQ_EXPR)
1903 { constant_boolean_node (false, type); })
1904 (if (eqne == NE_EXPR)
1905 @2)
1906 )))
1907
1908/* x < y && x != XXX_MAX --> x < y
1909 x < y && x == XXX_MAX --> false. */
1910(for eqne (eq ne)
1911 (simplify
1912 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
1913 (switch
1914 (if (eqne == EQ_EXPR)
1915 { constant_boolean_node (false, type); })
1916 (if (eqne == NE_EXPR)
1917 @2)
1918 )))
1919
1920/* x <= y && x == XXX_MIN --> x == XXX_MIN. */
1921(simplify
1922 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
1923 @2)
1924
1925/* x >= y && x == XXX_MAX --> x == XXX_MAX. */
1926(simplify
1927 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
1928 @2)
1929
1930/* x > y || x != XXX_MIN --> x != XXX_MIN. */
1931(simplify
1932 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
1933 @2)
1934
1935/* x <= y || x != XXX_MIN --> true. */
1936(simplify
1937 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
1938 { constant_boolean_node (true, type); })
1939
1940/* x <= y || x == XXX_MIN --> x <= y. */
1941(simplify
1942 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
1943 @2)
1944
1945/* x < y || x != XXX_MAX --> x != XXX_MAX. */
1946(simplify
1947 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
1948 @2)
1949
1950/* x >= y || x != XXX_MAX --> true
1951 x >= y || x == XXX_MAX --> x >= y. */
1952(for eqne (eq ne)
1953 (simplify
1954 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
1955 (switch
1956 (if (eqne == EQ_EXPR)
1957 @2)
1958 (if (eqne == NE_EXPR)
1959 { constant_boolean_node (true, type); }))))
a499aac5 1960
ae9c3507
ML
1961/* Convert (X == CST1) && (X OP2 CST2) to a known value
1962 based on CST1 OP2 CST2. Similarly for (X != CST1). */
1963
1964(for code1 (eq ne)
1965 (for code2 (eq ne lt gt le ge)
1966 (simplify
1967 (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
1968 (with
1969 {
1970 int cmp = tree_int_cst_compare (@1, @2);
1971 bool val;
1972 switch (code2)
1973 {
1974 case EQ_EXPR: val = (cmp == 0); break;
1975 case NE_EXPR: val = (cmp != 0); break;
1976 case LT_EXPR: val = (cmp < 0); break;
1977 case GT_EXPR: val = (cmp > 0); break;
1978 case LE_EXPR: val = (cmp <= 0); break;
1979 case GE_EXPR: val = (cmp >= 0); break;
1980 default: gcc_unreachable ();
1981 }
1982 }
1983 (switch
1984 (if (code1 == EQ_EXPR && val) @3)
1985 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
1986 (if (code1 == NE_EXPR && !val) @4))))))
1987
1988/* Convert (X OP1 CST1) && (X OP2 CST2). */
1989
1990(for code1 (lt le gt ge)
1991 (for code2 (lt le gt ge)
1992 (simplify
1993 (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2))
1994 (with
1995 {
1996 int cmp = tree_int_cst_compare (@1, @2);
1997 }
1998 (switch
1999 /* Choose the more restrictive of two < or <= comparisons. */
2000 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2001 && (code2 == LT_EXPR || code2 == LE_EXPR))
2002 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2003 @3
2004 @4))
2005 /* Likewise chose the more restrictive of two > or >= comparisons. */
2006 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2007 && (code2 == GT_EXPR || code2 == GE_EXPR))
2008 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2009 @3
2010 @4))
2011 /* Check for singleton ranges. */
2012 (if (cmp == 0
2013 && ((code1 == LE_EXPR && code2 == GE_EXPR)
2014 || (code1 == GE_EXPR && code2 == LE_EXPR)))
2015 (eq @0 @1))
2016 /* Check for disjoint ranges. */
2017 (if (cmp <= 0
2018 && (code1 == LT_EXPR || code1 == LE_EXPR)
2019 && (code2 == GT_EXPR || code2 == GE_EXPR))
2020 { constant_boolean_node (false, type); })
2021 (if (cmp >= 0
2022 && (code1 == GT_EXPR || code1 == GE_EXPR)
2023 && (code2 == LT_EXPR || code2 == LE_EXPR))
2024 { constant_boolean_node (false, type); })
2025 )))))
2026
130c4034
ML
2027/* Convert (X == CST1) || (X OP2 CST2) to a known value
2028 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2029
2030(for code1 (eq ne)
2031 (for code2 (eq ne lt gt le ge)
2032 (simplify
2033 (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2034 (with
2035 {
2036 int cmp = tree_int_cst_compare (@1, @2);
2037 bool val;
2038 switch (code2)
2039 {
2040 case EQ_EXPR: val = (cmp == 0); break;
2041 case NE_EXPR: val = (cmp != 0); break;
2042 case LT_EXPR: val = (cmp < 0); break;
2043 case GT_EXPR: val = (cmp > 0); break;
2044 case LE_EXPR: val = (cmp <= 0); break;
2045 case GE_EXPR: val = (cmp >= 0); break;
2046 default: gcc_unreachable ();
2047 }
2048 }
2049 (switch
2050 (if (code1 == EQ_EXPR && val) @4)
2051 (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); })
2052 (if (code1 == NE_EXPR && !val) @3))))))
2053
cda65821
ML
2054/* Convert (X OP1 CST1) || (X OP2 CST2). */
2055
2056(for code1 (lt le gt ge)
2057 (for code2 (lt le gt ge)
2058 (simplify
2059 (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2060 (with
2061 {
2062 int cmp = tree_int_cst_compare (@1, @2);
2063 }
2064 (switch
2065 /* Choose the more restrictive of two < or <= comparisons. */
2066 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2067 && (code2 == LT_EXPR || code2 == LE_EXPR))
2068 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2069 @4
2070 @3))
2071 /* Likewise chose the more restrictive of two > or >= comparisons. */
2072 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2073 && (code2 == GT_EXPR || code2 == GE_EXPR))
2074 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2075 @4
2076 @3))
2077 /* Check for singleton ranges. */
2078 (if (cmp == 0
2079 && ((code1 == LT_EXPR && code2 == GT_EXPR)
2080 || (code1 == GT_EXPR && code2 == LT_EXPR)))
2081 (ne @0 @2))
2082 /* Check for disjoint ranges. */
2083 (if (cmp >= 0
2084 && (code1 == LT_EXPR || code1 == LE_EXPR)
2085 && (code2 == GT_EXPR || code2 == GE_EXPR))
2086 { constant_boolean_node (true, type); })
2087 (if (cmp <= 0
2088 && (code1 == GT_EXPR || code1 == GE_EXPR)
2089 && (code2 == LT_EXPR || code2 == LE_EXPR))
2090 { constant_boolean_node (true, type); })
2091 )))))
130c4034 2092
cc7b5acf
RB
2093/* We can't reassociate at all for saturating types. */
2094(if (!TYPE_SATURATING (type))
2095
2096 /* Contract negates. */
2097 /* A + (-B) -> A - B */
2098 (simplify
248179b5
RB
2099 (plus:c @0 (convert? (negate @1)))
2100 /* Apply STRIP_NOPS on the negate. */
2101 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 2102 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
2103 (with
2104 {
2105 tree t1 = type;
2106 if (INTEGRAL_TYPE_P (type)
2107 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2108 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2109 }
2110 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
cc7b5acf
RB
2111 /* A - (-B) -> A + B */
2112 (simplify
248179b5
RB
2113 (minus @0 (convert? (negate @1)))
2114 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 2115 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
2116 (with
2117 {
2118 tree t1 = type;
2119 if (INTEGRAL_TYPE_P (type)
2120 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2121 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2122 }
2123 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
63626547
MG
2124 /* -(T)(-A) -> (T)A
2125 Sign-extension is ok except for INT_MIN, which thankfully cannot
2126 happen without overflow. */
2127 (simplify
2128 (negate (convert (negate @1)))
2129 (if (INTEGRAL_TYPE_P (type)
2130 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
2131 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2132 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2133 && !TYPE_OVERFLOW_SANITIZED (type)
2134 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
a0f12cf8 2135 (convert @1)))
63626547
MG
2136 (simplify
2137 (negate (convert negate_expr_p@1))
2138 (if (SCALAR_FLOAT_TYPE_P (type)
2139 && ((DECIMAL_FLOAT_TYPE_P (type)
2140 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2141 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2142 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2143 (convert (negate @1))))
2144 (simplify
2145 (negate (nop_convert (negate @1)))
2146 (if (!TYPE_OVERFLOW_SANITIZED (type)
2147 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2148 (view_convert @1)))
cc7b5acf 2149
7318e44f
RB
2150 /* We can't reassociate floating-point unless -fassociative-math
2151 or fixed-point plus or minus because of saturation to +-Inf. */
2152 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2153 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
2154
2155 /* Match patterns that allow contracting a plus-minus pair
2156 irrespective of overflow issues. */
2157 /* (A +- B) - A -> +- B */
2158 /* (A +- B) -+ B -> A */
2159 /* A - (A +- B) -> -+ B */
2160 /* A +- (B -+ A) -> +- B */
2161 (simplify
2162 (minus (plus:c @0 @1) @0)
2163 @1)
2164 (simplify
2165 (minus (minus @0 @1) @0)
2166 (negate @1))
2167 (simplify
2168 (plus:c (minus @0 @1) @1)
2169 @0)
2170 (simplify
2171 (minus @0 (plus:c @0 @1))
2172 (negate @1))
2173 (simplify
2174 (minus @0 (minus @0 @1))
2175 @1)
1e7df2e6
MG
2176 /* (A +- B) + (C - A) -> C +- B */
2177 /* (A + B) - (A - C) -> B + C */
2178 /* More cases are handled with comparisons. */
2179 (simplify
2180 (plus:c (plus:c @0 @1) (minus @2 @0))
2181 (plus @2 @1))
2182 (simplify
2183 (plus:c (minus @0 @1) (minus @2 @0))
2184 (minus @2 @1))
1af4ebf5
MG
2185 (simplify
2186 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2187 (if (TYPE_OVERFLOW_UNDEFINED (type)
2188 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2189 (pointer_diff @2 @1)))
1e7df2e6
MG
2190 (simplify
2191 (minus (plus:c @0 @1) (minus @0 @2))
2192 (plus @1 @2))
cc7b5acf 2193
ed73f46f
MG
2194 /* (A +- CST1) +- CST2 -> A + CST3
2195 Use view_convert because it is safe for vectors and equivalent for
2196 scalars. */
cc7b5acf
RB
2197 (for outer_op (plus minus)
2198 (for inner_op (plus minus)
ed73f46f 2199 neg_inner_op (minus plus)
cc7b5acf 2200 (simplify
ed73f46f
MG
2201 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
2202 CONSTANT_CLASS_P@2)
2203 /* If one of the types wraps, use that one. */
2204 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3eb1eecf
JJ
2205 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2206 forever if something doesn't simplify into a constant. */
2207 (if (!CONSTANT_CLASS_P (@0))
2208 (if (outer_op == PLUS_EXPR)
2209 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
2210 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
ed73f46f
MG
2211 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2212 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2213 (if (outer_op == PLUS_EXPR)
2214 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
2215 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
2216 /* If the constant operation overflows we cannot do the transform
2217 directly as we would introduce undefined overflow, for example
2218 with (a - 1) + INT_MIN. */
2219 (if (types_match (type, @0))
2220 (with { tree cst = const_binop (outer_op == inner_op
2221 ? PLUS_EXPR : MINUS_EXPR,
2222 type, @1, @2); }
2223 (if (cst && !TREE_OVERFLOW (cst))
2224 (inner_op @0 { cst; } )
2225 /* X+INT_MAX+1 is X-INT_MIN. */
2226 (if (INTEGRAL_TYPE_P (type) && cst
8e6cdc90
RS
2227 && wi::to_wide (cst) == wi::min_value (type))
2228 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
ed73f46f
MG
2229 /* Last resort, use some unsigned type. */
2230 (with { tree utype = unsigned_type_for (type); }
48fcd201
JJ
2231 (if (utype)
2232 (view_convert (inner_op
2233 (view_convert:utype @0)
2234 (view_convert:utype
2235 { drop_tree_overflow (cst); }))))))))))))))
cc7b5acf 2236
b302f2e0 2237 /* (CST1 - A) +- CST2 -> CST3 - A */
cc7b5acf
RB
2238 (for outer_op (plus minus)
2239 (simplify
2240 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
23f27839 2241 (with { tree cst = const_binop (outer_op, type, @1, @2); }
cc7b5acf
RB
2242 (if (cst && !TREE_OVERFLOW (cst))
2243 (minus { cst; } @0)))))
2244
b302f2e0
RB
2245 /* CST1 - (CST2 - A) -> CST3 + A */
2246 (simplify
2247 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
2248 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2249 (if (cst && !TREE_OVERFLOW (cst))
2250 (plus { cst; } @0))))
2251
df7d46d9
RD
2252/* ((T)(A)) + CST -> (T)(A + CST) */
2253#if GIMPLE
2254 (simplify
2255 (plus (convert SSA_NAME@0) INTEGER_CST@1)
2256 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2257 && TREE_CODE (type) == INTEGER_TYPE
2258 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2259 && int_fits_type_p (@1, TREE_TYPE (@0)))
2260 /* Perform binary operation inside the cast if the constant fits
2261 and (A + CST)'s range does not overflow. */
2262 (with
2263 {
2264 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
2265 max_ovf = wi::OVF_OVERFLOW;
2266 tree inner_type = TREE_TYPE (@0);
2267
2268 wide_int w1 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
2269 TYPE_SIGN (inner_type));
2270
2271 wide_int wmin0, wmax0;
2272 if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
2273 {
2274 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
2275 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
2276 }
2277 }
2278 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
2279 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
2280 )))
2281#endif
2282
cc7b5acf
RB
2283 /* ~A + A -> -1 */
2284 (simplify
2285 (plus:c (bit_not @0) @0)
2286 (if (!TYPE_OVERFLOW_TRAPS (type))
2287 { build_all_ones_cst (type); }))
2288
2289 /* ~A + 1 -> -A */
2290 (simplify
e19740ae
RB
2291 (plus (convert? (bit_not @0)) integer_each_onep)
2292 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2293 (negate (convert @0))))
2294
2295 /* -A - 1 -> ~A */
2296 (simplify
2297 (minus (convert? (negate @0)) integer_each_onep)
2298 (if (!TYPE_OVERFLOW_TRAPS (type)
2299 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2300 (bit_not (convert @0))))
2301
2302 /* -1 - A -> ~A */
2303 (simplify
2304 (minus integer_all_onesp @0)
bc4315fb 2305 (bit_not @0))
cc7b5acf
RB
2306
2307 /* (T)(P + A) - (T)P -> (T) A */
d7f44d4d 2308 (simplify
a72610d4
JJ
2309 (minus (convert (plus:c @@0 @1))
2310 (convert? @0))
d7f44d4d
JJ
2311 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2312 /* For integer types, if A has a smaller type
2313 than T the result depends on the possible
2314 overflow in P + A.
2315 E.g. T=size_t, A=(unsigned)429497295, P>0.
2316 However, if an overflow in P + A would cause
2317 undefined behavior, we can assume that there
2318 is no overflow. */
a72610d4
JJ
2319 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2320 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
2321 (convert @1)))
2322 (simplify
2323 (minus (convert (pointer_plus @@0 @1))
2324 (convert @0))
2325 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2326 /* For pointer types, if the conversion of A to the
2327 final type requires a sign- or zero-extension,
2328 then we have to punt - it is not defined which
2329 one is correct. */
2330 || (POINTER_TYPE_P (TREE_TYPE (@0))
2331 && TREE_CODE (@1) == INTEGER_CST
2332 && tree_int_cst_sign_bit (@1) == 0))
2333 (convert @1)))
1af4ebf5
MG
2334 (simplify
2335 (pointer_diff (pointer_plus @@0 @1) @0)
2336 /* The second argument of pointer_plus must be interpreted as signed, and
2337 thus sign-extended if necessary. */
2338 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2339 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2340 second arg is unsigned even when we need to consider it as signed,
2341 we don't want to diagnose overflow here. */
2342 (convert (view_convert:stype @1))))
a8fc2579
RB
2343
2344 /* (T)P - (T)(P + A) -> -(T) A */
d7f44d4d 2345 (simplify
a72610d4
JJ
2346 (minus (convert? @0)
2347 (convert (plus:c @@0 @1)))
d7f44d4d
JJ
2348 (if (INTEGRAL_TYPE_P (type)
2349 && TYPE_OVERFLOW_UNDEFINED (type)
2350 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2351 (with { tree utype = unsigned_type_for (type); }
2352 (convert (negate (convert:utype @1))))
a8fc2579
RB
2353 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2354 /* For integer types, if A has a smaller type
2355 than T the result depends on the possible
2356 overflow in P + A.
2357 E.g. T=size_t, A=(unsigned)429497295, P>0.
2358 However, if an overflow in P + A would cause
2359 undefined behavior, we can assume that there
2360 is no overflow. */
a72610d4
JJ
2361 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2362 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
2363 (negate (convert @1)))))
2364 (simplify
2365 (minus (convert @0)
2366 (convert (pointer_plus @@0 @1)))
2367 (if (INTEGRAL_TYPE_P (type)
2368 && TYPE_OVERFLOW_UNDEFINED (type)
2369 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2370 (with { tree utype = unsigned_type_for (type); }
2371 (convert (negate (convert:utype @1))))
2372 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2373 /* For pointer types, if the conversion of A to the
2374 final type requires a sign- or zero-extension,
2375 then we have to punt - it is not defined which
2376 one is correct. */
2377 || (POINTER_TYPE_P (TREE_TYPE (@0))
2378 && TREE_CODE (@1) == INTEGER_CST
2379 && tree_int_cst_sign_bit (@1) == 0))
2380 (negate (convert @1)))))
1af4ebf5
MG
2381 (simplify
2382 (pointer_diff @0 (pointer_plus @@0 @1))
2383 /* The second argument of pointer_plus must be interpreted as signed, and
2384 thus sign-extended if necessary. */
2385 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2386 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2387 second arg is unsigned even when we need to consider it as signed,
2388 we don't want to diagnose overflow here. */
2389 (negate (convert (view_convert:stype @1)))))
a8fc2579
RB
2390
2391 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
d7f44d4d 2392 (simplify
a72610d4 2393 (minus (convert (plus:c @@0 @1))
d7f44d4d
JJ
2394 (convert (plus:c @0 @2)))
2395 (if (INTEGRAL_TYPE_P (type)
2396 && TYPE_OVERFLOW_UNDEFINED (type)
a72610d4
JJ
2397 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2398 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
d7f44d4d
JJ
2399 (with { tree utype = unsigned_type_for (type); }
2400 (convert (minus (convert:utype @1) (convert:utype @2))))
a72610d4
JJ
2401 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2402 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2403 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2404 /* For integer types, if A has a smaller type
2405 than T the result depends on the possible
2406 overflow in P + A.
2407 E.g. T=size_t, A=(unsigned)429497295, P>0.
2408 However, if an overflow in P + A would cause
2409 undefined behavior, we can assume that there
2410 is no overflow. */
2411 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2412 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2413 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2414 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
d7f44d4d
JJ
2415 (minus (convert @1) (convert @2)))))
2416 (simplify
2417 (minus (convert (pointer_plus @@0 @1))
2418 (convert (pointer_plus @0 @2)))
2419 (if (INTEGRAL_TYPE_P (type)
2420 && TYPE_OVERFLOW_UNDEFINED (type)
2421 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2422 (with { tree utype = unsigned_type_for (type); }
2423 (convert (minus (convert:utype @1) (convert:utype @2))))
2424 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2425 /* For pointer types, if the conversion of A to the
2426 final type requires a sign- or zero-extension,
2427 then we have to punt - it is not defined which
2428 one is correct. */
2429 || (POINTER_TYPE_P (TREE_TYPE (@0))
2430 && TREE_CODE (@1) == INTEGER_CST
2431 && tree_int_cst_sign_bit (@1) == 0
2432 && TREE_CODE (@2) == INTEGER_CST
2433 && tree_int_cst_sign_bit (@2) == 0))
d7f44d4d 2434 (minus (convert @1) (convert @2)))))
1af4ebf5
MG
2435 (simplify
2436 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2437 /* The second argument of pointer_plus must be interpreted as signed, and
2438 thus sign-extended if necessary. */
2439 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2440 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2441 second arg is unsigned even when we need to consider it as signed,
2442 we don't want to diagnose overflow here. */
2443 (minus (convert (view_convert:stype @1))
2444 (convert (view_convert:stype @2)))))))
cc7b5acf 2445
5b55e6e3
RB
2446/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2447 Modeled after fold_plusminus_mult_expr. */
2448(if (!TYPE_SATURATING (type)
2449 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2450 (for plusminus (plus minus)
2451 (simplify
c1bbe5b3
RB
2452 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2453 (if ((!ANY_INTEGRAL_TYPE_P (type)
5b55e6e3
RB
2454 || TYPE_OVERFLOW_WRAPS (type)
2455 || (INTEGRAL_TYPE_P (type)
2456 && tree_expr_nonzero_p (@0)
2457 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
c1bbe5b3
RB
2458 /* If @1 +- @2 is constant require a hard single-use on either
2459 original operand (but not on both). */
2460 && (single_use (@3) || single_use (@4)))
2461 (mult (plusminus @1 @2) @0)))
2462 /* We cannot generate constant 1 for fract. */
2463 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2464 (simplify
2465 (plusminus @0 (mult:c@3 @0 @2))
2466 (if ((!ANY_INTEGRAL_TYPE_P (type)
2467 || TYPE_OVERFLOW_WRAPS (type)
2468 || (INTEGRAL_TYPE_P (type)
2469 && tree_expr_nonzero_p (@0)
2470 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2471 && single_use (@3))
5b55e6e3
RB
2472 (mult (plusminus { build_one_cst (type); } @2) @0)))
2473 (simplify
c1bbe5b3
RB
2474 (plusminus (mult:c@3 @0 @2) @0)
2475 (if ((!ANY_INTEGRAL_TYPE_P (type)
2476 || TYPE_OVERFLOW_WRAPS (type)
2477 || (INTEGRAL_TYPE_P (type)
2478 && tree_expr_nonzero_p (@0)
2479 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2480 && single_use (@3))
5b55e6e3 2481 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
cc7b5acf 2482
0122e8e5 2483/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 2484
c6cfa2bf 2485(for minmax (min max FMIN_ALL FMAX_ALL)
a7f24614
RB
2486 (simplify
2487 (minmax @0 @0)
2488 @0))
4a334cba
RS
2489/* min(max(x,y),y) -> y. */
2490(simplify
2491 (min:c (max:c @0 @1) @1)
2492 @1)
2493/* max(min(x,y),y) -> y. */
2494(simplify
2495 (max:c (min:c @0 @1) @1)
2496 @1)
d657e995
RB
2497/* max(a,-a) -> abs(a). */
2498(simplify
2499 (max:c @0 (negate @0))
2500 (if (TREE_CODE (type) != COMPLEX_TYPE
2501 && (! ANY_INTEGRAL_TYPE_P (type)
2502 || TYPE_OVERFLOW_UNDEFINED (type)))
2503 (abs @0)))
54f84ca9
RB
2504/* min(a,-a) -> -abs(a). */
2505(simplify
2506 (min:c @0 (negate @0))
2507 (if (TREE_CODE (type) != COMPLEX_TYPE
2508 && (! ANY_INTEGRAL_TYPE_P (type)
2509 || TYPE_OVERFLOW_UNDEFINED (type)))
2510 (negate (abs @0))))
a7f24614
RB
2511(simplify
2512 (min @0 @1)
2c2870a1
MG
2513 (switch
2514 (if (INTEGRAL_TYPE_P (type)
2515 && TYPE_MIN_VALUE (type)
2516 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2517 @1)
2518 (if (INTEGRAL_TYPE_P (type)
2519 && TYPE_MAX_VALUE (type)
2520 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2521 @0)))
a7f24614
RB
2522(simplify
2523 (max @0 @1)
2c2870a1
MG
2524 (switch
2525 (if (INTEGRAL_TYPE_P (type)
2526 && TYPE_MAX_VALUE (type)
2527 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2528 @1)
2529 (if (INTEGRAL_TYPE_P (type)
2530 && TYPE_MIN_VALUE (type)
2531 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2532 @0)))
ad6e4ba8 2533
182f37c9
N
2534/* max (a, a + CST) -> a + CST where CST is positive. */
2535/* max (a, a + CST) -> a where CST is negative. */
2536(simplify
2537 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2538 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2539 (if (tree_int_cst_sgn (@1) > 0)
2540 @2
2541 @0)))
2542
2543/* min (a, a + CST) -> a where CST is positive. */
2544/* min (a, a + CST) -> a + CST where CST is negative. */
2545(simplify
2546 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2547 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2548 (if (tree_int_cst_sgn (@1) > 0)
2549 @0
2550 @2)))
2551
ad6e4ba8
BC
2552/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2553 and the outer convert demotes the expression back to x's type. */
2554(for minmax (min max)
2555 (simplify
2556 (convert (minmax@0 (convert @1) INTEGER_CST@2))
ebf41734
BC
2557 (if (INTEGRAL_TYPE_P (type)
2558 && types_match (@1, type) && int_fits_type_p (@2, type)
ad6e4ba8
BC
2559 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2560 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2561 (minmax @1 (convert @2)))))
2562
c6cfa2bf 2563(for minmax (FMIN_ALL FMAX_ALL)
0122e8e5
RS
2564 /* If either argument is NaN, return the other one. Avoid the
2565 transformation if we get (and honor) a signalling NaN. */
2566 (simplify
2567 (minmax:c @0 REAL_CST@1)
2568 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2569 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2570 @0)))
2571/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2572 functions to return the numeric arg if the other one is NaN.
2573 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2574 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2575 worry about it either. */
2576(if (flag_finite_math_only)
2577 (simplify
c6cfa2bf 2578 (FMIN_ALL @0 @1)
0122e8e5 2579 (min @0 @1))
4119b2eb 2580 (simplify
c6cfa2bf 2581 (FMAX_ALL @0 @1)
0122e8e5 2582 (max @0 @1)))
ce0e66ff 2583/* min (-A, -B) -> -max (A, B) */
c6cfa2bf
MM
2584(for minmax (min max FMIN_ALL FMAX_ALL)
2585 maxmin (max min FMAX_ALL FMIN_ALL)
ce0e66ff
MG
2586 (simplify
2587 (minmax (negate:s@2 @0) (negate:s@3 @1))
2588 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2589 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2590 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2591 (negate (maxmin @0 @1)))))
2592/* MIN (~X, ~Y) -> ~MAX (X, Y)
2593 MAX (~X, ~Y) -> ~MIN (X, Y) */
2594(for minmax (min max)
2595 maxmin (max min)
2596 (simplify
2597 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2598 (bit_not (maxmin @0 @1))))
a7f24614 2599
b4817bd6
MG
2600/* MIN (X, Y) == X -> X <= Y */
2601(for minmax (min min max max)
2602 cmp (eq ne eq ne )
2603 out (le gt ge lt )
2604 (simplify
2605 (cmp:c (minmax:c @0 @1) @0)
2606 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2607 (out @0 @1))))
2608/* MIN (X, 5) == 0 -> X == 0
2609 MIN (X, 5) == 7 -> false */
2610(for cmp (eq ne)
2611 (simplify
2612 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2613 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2614 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2615 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2616 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2617 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2618 (cmp @0 @2)))))
2619(for cmp (eq ne)
2620 (simplify
2621 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2622 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2623 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2624 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2625 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2626 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2627 (cmp @0 @2)))))
2628/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2629(for minmax (min min max max min min max max )
2630 cmp (lt le gt ge gt ge lt le )
2631 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2632 (simplify
2633 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2634 (comb (cmp @0 @2) (cmp @1 @2))))
2635
a7f24614
RB
2636/* Simplifications of shift and rotates. */
2637
2638(for rotate (lrotate rrotate)
2639 (simplify
2640 (rotate integer_all_onesp@0 @1)
2641 @0))
2642
2643/* Optimize -1 >> x for arithmetic right shifts. */
2644(simplify
2645 (rshift integer_all_onesp@0 @1)
2646 (if (!TYPE_UNSIGNED (type)
2647 && tree_expr_nonnegative_p (@1))
2648 @0))
2649
12085390
N
2650/* Optimize (x >> c) << c into x & (-1<<c). */
2651(simplify
2652 (lshift (rshift @0 INTEGER_CST@1) @1)
8e6cdc90 2653 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
12085390
N
2654 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2655
2656/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2657 types. */
2658(simplify
2659 (rshift (lshift @0 INTEGER_CST@1) @1)
2660 (if (TYPE_UNSIGNED (type)
8e6cdc90 2661 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
12085390
N
2662 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2663
a7f24614
RB
2664(for shiftrotate (lrotate rrotate lshift rshift)
2665 (simplify
2666 (shiftrotate @0 integer_zerop)
2667 (non_lvalue @0))
2668 (simplify
2669 (shiftrotate integer_zerop@0 @1)
2670 @0)
2671 /* Prefer vector1 << scalar to vector1 << vector2
2672 if vector2 is uniform. */
2673 (for vec (VECTOR_CST CONSTRUCTOR)
2674 (simplify
2675 (shiftrotate @0 vec@1)
2676 (with { tree tem = uniform_vector_p (@1); }
2677 (if (tem)
2678 (shiftrotate @0 { tem; }))))))
2679
165ba2e9
JJ
2680/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2681 Y is 0. Similarly for X >> Y. */
2682#if GIMPLE
2683(for shift (lshift rshift)
2684 (simplify
2685 (shift @0 SSA_NAME@1)
2686 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2687 (with {
2688 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2689 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2690 }
2691 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2692 @0)))))
2693#endif
2694
a7f24614
RB
2695/* Rewrite an LROTATE_EXPR by a constant into an
2696 RROTATE_EXPR by a new constant. */
2697(simplify
2698 (lrotate @0 INTEGER_CST@1)
23f27839 2699 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
2700 build_int_cst (TREE_TYPE (@1),
2701 element_precision (type)), @1); }))
2702
14ea9f92
RB
2703/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2704(for op (lrotate rrotate rshift lshift)
2705 (simplify
2706 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2707 (with { unsigned int prec = element_precision (type); }
8e6cdc90
RS
2708 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2709 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2710 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2711 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
a1488398
RS
2712 (with { unsigned int low = (tree_to_uhwi (@1)
2713 + tree_to_uhwi (@2)); }
14ea9f92
RB
2714 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2715 being well defined. */
2716 (if (low >= prec)
2717 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 2718 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 2719 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
2720 { build_zero_cst (type); }
2721 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2722 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
2723
2724
01ada710
MP
2725/* ((1 << A) & 1) != 0 -> A == 0
2726 ((1 << A) & 1) == 0 -> A != 0 */
2727(for cmp (ne eq)
2728 icmp (eq ne)
2729 (simplify
2730 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2731 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 2732
f2e609c3
MP
2733/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2734 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2735 if CST2 != 0. */
2736(for cmp (ne eq)
2737 (simplify
2738 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
8e6cdc90 2739 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
f2e609c3
MP
2740 (if (cand < 0
2741 || (!integer_zerop (@2)
8e6cdc90 2742 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
8fdc6c67
RB
2743 { constant_boolean_node (cmp == NE_EXPR, type); }
2744 (if (!integer_zerop (@2)
8e6cdc90 2745 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
8fdc6c67 2746 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 2747
1ffbaa3f
RB
2748/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2749 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2750 if the new mask might be further optimized. */
2751(for shift (lshift rshift)
2752 (simplify
44fc0a51
RB
2753 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2754 INTEGER_CST@2)
1ffbaa3f
RB
2755 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2756 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2757 && tree_fits_uhwi_p (@1)
2758 && tree_to_uhwi (@1) > 0
2759 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2760 (with
2761 {
2762 unsigned int shiftc = tree_to_uhwi (@1);
2763 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2764 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2765 tree shift_type = TREE_TYPE (@3);
2766 unsigned int prec;
2767
2768 if (shift == LSHIFT_EXPR)
fecfbfa4 2769 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f 2770 else if (shift == RSHIFT_EXPR
2be65d9e 2771 && type_has_mode_precision_p (shift_type))
1ffbaa3f
RB
2772 {
2773 prec = TYPE_PRECISION (TREE_TYPE (@3));
2774 tree arg00 = @0;
2775 /* See if more bits can be proven as zero because of
2776 zero extension. */
2777 if (@3 != @0
2778 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2779 {
2780 tree inner_type = TREE_TYPE (@0);
2be65d9e 2781 if (type_has_mode_precision_p (inner_type)
1ffbaa3f
RB
2782 && TYPE_PRECISION (inner_type) < prec)
2783 {
2784 prec = TYPE_PRECISION (inner_type);
2785 /* See if we can shorten the right shift. */
2786 if (shiftc < prec)
2787 shift_type = inner_type;
2788 /* Otherwise X >> C1 is all zeros, so we'll optimize
2789 it into (X, 0) later on by making sure zerobits
2790 is all ones. */
2791 }
2792 }
dd4786fe 2793 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
2794 if (shiftc < prec)
2795 {
2796 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2797 zerobits <<= prec - shiftc;
2798 }
2799 /* For arithmetic shift if sign bit could be set, zerobits
2800 can contain actually sign bits, so no transformation is
2801 possible, unless MASK masks them all away. In that
2802 case the shift needs to be converted into logical shift. */
2803 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2804 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2805 {
2806 if ((mask & zerobits) == 0)
2807 shift_type = unsigned_type_for (TREE_TYPE (@3));
2808 else
2809 zerobits = 0;
2810 }
2811 }
2812 }
2813 /* ((X << 16) & 0xff00) is (X, 0). */
2814 (if ((mask & zerobits) == mask)
8fdc6c67
RB
2815 { build_int_cst (type, 0); }
2816 (with { newmask = mask | zerobits; }
2817 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2818 (with
2819 {
2820 /* Only do the transformation if NEWMASK is some integer
2821 mode's mask. */
2822 for (prec = BITS_PER_UNIT;
2823 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 2824 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
2825 break;
2826 }
2827 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 2828 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
2829 (with
2830 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2831 (if (!tree_int_cst_equal (newmaskt, @2))
2832 (if (shift_type != TREE_TYPE (@3))
2833 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2834 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 2835
84ff66b8
AV
2836/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2837 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 2838(for shift (lshift rshift)
84ff66b8
AV
2839 (for bit_op (bit_and bit_xor bit_ior)
2840 (simplify
2841 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2842 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2843 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2844 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 2845
ad1d92ab
MM
2846/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2847(simplify
2848 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2849 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
2850 && (element_precision (TREE_TYPE (@0))
2851 <= element_precision (TREE_TYPE (@1))
2852 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
2853 (with
2854 { tree shift_type = TREE_TYPE (@0); }
2855 (convert (rshift (convert:shift_type @1) @2)))))
2856
2857/* ~(~X >>r Y) -> X >>r Y
2858 ~(~X <<r Y) -> X <<r Y */
2859(for rotate (lrotate rrotate)
2860 (simplify
2861 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
2862 (if ((element_precision (TREE_TYPE (@0))
2863 <= element_precision (TREE_TYPE (@1))
2864 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2865 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2866 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
2867 (with
2868 { tree rotate_type = TREE_TYPE (@0); }
2869 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 2870
d4573ffe
RB
2871/* Simplifications of conversions. */
2872
2873/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 2874(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
2875 (simplify
2876 (cvt @0)
2877 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2878 || (GENERIC && type == TREE_TYPE (@0)))
2879 @0)))
2880
2881/* Contract view-conversions. */
2882(simplify
2883 (view_convert (view_convert @0))
2884 (view_convert @0))
2885
2886/* For integral conversions with the same precision or pointer
2887 conversions use a NOP_EXPR instead. */
2888(simplify
2889 (view_convert @0)
2890 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2891 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2892 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2893 (convert @0)))
2894
bce8ef71
MG
2895/* Strip inner integral conversions that do not change precision or size, or
2896 zero-extend while keeping the same size (for bool-to-char). */
d4573ffe
RB
2897(simplify
2898 (view_convert (convert@0 @1))
2899 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2900 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
bce8ef71
MG
2901 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2902 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2903 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2904 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
d4573ffe
RB
2905 (view_convert @1)))
2906
f469220d
RB
2907/* Simplify a view-converted empty constructor. */
2908(simplify
2909 (view_convert CONSTRUCTOR@0)
2910 (if (TREE_CODE (@0) != SSA_NAME
2911 && CONSTRUCTOR_NELTS (@0) == 0)
2912 { build_zero_cst (type); }))
2913
d4573ffe
RB
2914/* Re-association barriers around constants and other re-association
2915 barriers can be removed. */
2916(simplify
2917 (paren CONSTANT_CLASS_P@0)
2918 @0)
2919(simplify
2920 (paren (paren@1 @0))
2921 @1)
1e51d0a2
RB
2922
2923/* Handle cases of two conversions in a row. */
2924(for ocvt (convert float fix_trunc)
2925 (for icvt (convert float)
2926 (simplify
2927 (ocvt (icvt@1 @0))
2928 (with
2929 {
2930 tree inside_type = TREE_TYPE (@0);
2931 tree inter_type = TREE_TYPE (@1);
2932 int inside_int = INTEGRAL_TYPE_P (inside_type);
2933 int inside_ptr = POINTER_TYPE_P (inside_type);
2934 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 2935 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
2936 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2937 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2938 int inter_int = INTEGRAL_TYPE_P (inter_type);
2939 int inter_ptr = POINTER_TYPE_P (inter_type);
2940 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 2941 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
2942 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2943 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2944 int final_int = INTEGRAL_TYPE_P (type);
2945 int final_ptr = POINTER_TYPE_P (type);
2946 int final_float = FLOAT_TYPE_P (type);
09240451 2947 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
2948 unsigned int final_prec = TYPE_PRECISION (type);
2949 int final_unsignedp = TYPE_UNSIGNED (type);
2950 }
64d3a1f0
RB
2951 (switch
2952 /* In addition to the cases of two conversions in a row
2953 handled below, if we are converting something to its own
2954 type via an object of identical or wider precision, neither
2955 conversion is needed. */
2956 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2957 || (GENERIC
2958 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2959 && (((inter_int || inter_ptr) && final_int)
2960 || (inter_float && final_float))
2961 && inter_prec >= final_prec)
2962 (ocvt @0))
2963
2964 /* Likewise, if the intermediate and initial types are either both
2965 float or both integer, we don't need the middle conversion if the
2966 former is wider than the latter and doesn't change the signedness
2967 (for integers). Avoid this if the final type is a pointer since
36088299 2968 then we sometimes need the middle conversion. */
64d3a1f0
RB
2969 (if (((inter_int && inside_int) || (inter_float && inside_float))
2970 && (final_int || final_float)
2971 && inter_prec >= inside_prec
36088299 2972 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
2973 (ocvt @0))
2974
2975 /* If we have a sign-extension of a zero-extended value, we can
2976 replace that by a single zero-extension. Likewise if the
2977 final conversion does not change precision we can drop the
2978 intermediate conversion. */
2979 (if (inside_int && inter_int && final_int
2980 && ((inside_prec < inter_prec && inter_prec < final_prec
2981 && inside_unsignedp && !inter_unsignedp)
2982 || final_prec == inter_prec))
2983 (ocvt @0))
2984
2985 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
2986 - some conversion is floating-point (overstrict for now), or
2987 - some conversion is a vector (overstrict for now), or
2988 - the intermediate type is narrower than both initial and
2989 final, or
2990 - the intermediate type and innermost type differ in signedness,
2991 and the outermost type is wider than the intermediate, or
2992 - the initial type is a pointer type and the precisions of the
2993 intermediate and final types differ, or
2994 - the final type is a pointer type and the precisions of the
2995 initial and intermediate types differ. */
64d3a1f0
RB
2996 (if (! inside_float && ! inter_float && ! final_float
2997 && ! inside_vec && ! inter_vec && ! final_vec
2998 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2999 && ! (inside_int && inter_int
3000 && inter_unsignedp != inside_unsignedp
3001 && inter_prec < final_prec)
3002 && ((inter_unsignedp && inter_prec > inside_prec)
3003 == (final_unsignedp && final_prec > inter_prec))
3004 && ! (inside_ptr && inter_prec != final_prec)
36088299 3005 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
3006 (ocvt @0))
3007
3008 /* A truncation to an unsigned type (a zero-extension) should be
3009 canonicalized as bitwise and of a mask. */
1d510e04
JJ
3010 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
3011 && final_int && inter_int && inside_int
64d3a1f0
RB
3012 && final_prec == inside_prec
3013 && final_prec > inter_prec
3014 && inter_unsignedp)
3015 (convert (bit_and @0 { wide_int_to_tree
3016 (inside_type,
3017 wi::mask (inter_prec, false,
3018 TYPE_PRECISION (inside_type))); })))
3019
3020 /* If we are converting an integer to a floating-point that can
3021 represent it exactly and back to an integer, we can skip the
3022 floating-point conversion. */
3023 (if (GIMPLE /* PR66211 */
3024 && inside_int && inter_float && final_int &&
3025 (unsigned) significand_size (TYPE_MODE (inter_type))
3026 >= inside_prec - !inside_unsignedp)
3027 (convert @0)))))))
ea2042ba
RB
3028
3029/* If we have a narrowing conversion to an integral type that is fed by a
3030 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
3031 masks off bits outside the final type (and nothing else). */
3032(simplify
3033 (convert (bit_and @0 INTEGER_CST@1))
3034 (if (INTEGRAL_TYPE_P (type)
3035 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3036 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
3037 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
3038 TYPE_PRECISION (type)), 0))
3039 (convert @0)))
a25454ea
RB
3040
3041
3042/* (X /[ex] A) * A -> X. */
3043(simplify
2eef1fc1
RB
3044 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
3045 (convert @0))
eaeba53a 3046
839d0860
RB
3047/* Simplify (A / B) * B + (A % B) -> A. */
3048(for div (trunc_div ceil_div floor_div round_div)
3049 mod (trunc_mod ceil_mod floor_mod round_mod)
3050 (simplify
3051 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
3052 @0))
3053
0036218b
MG
3054/* ((X /[ex] A) +- B) * A --> X +- A * B. */
3055(for op (plus minus)
3056 (simplify
3057 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
3058 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
3059 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
3060 (with
3061 {
3062 wi::overflow_type overflow;
3063 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
3064 TYPE_SIGN (type), &overflow);
3065 }
3066 (if (types_match (type, TREE_TYPE (@2))
3067 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
3068 (op @0 { wide_int_to_tree (type, mul); })
3069 (with { tree utype = unsigned_type_for (type); }
3070 (convert (op (convert:utype @0)
3071 (mult (convert:utype @1) (convert:utype @2))))))))))
3072
a7f24614
RB
3073/* Canonicalization of binary operations. */
3074
3075/* Convert X + -C into X - C. */
3076(simplify
3077 (plus @0 REAL_CST@1)
3078 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 3079 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
3080 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
3081 (minus @0 { tem; })))))
3082
6b6aa8d3 3083/* Convert x+x into x*2. */
a7f24614
RB
3084(simplify
3085 (plus @0 @0)
3086 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
3087 (mult @0 { build_real (type, dconst2); })
3088 (if (INTEGRAL_TYPE_P (type))
3089 (mult @0 { build_int_cst (type, 2); }))))
a7f24614 3090
406520e2 3091/* 0 - X -> -X. */
a7f24614
RB
3092(simplify
3093 (minus integer_zerop @1)
3094 (negate @1))
406520e2
MG
3095(simplify
3096 (pointer_diff integer_zerop @1)
3097 (negate (convert @1)))
a7f24614
RB
3098
3099/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
3100 ARG0 is zero and X + ARG0 reduces to X, since that would mean
3101 (-ARG1 + ARG0) reduces to -ARG1. */
3102(simplify
3103 (minus real_zerop@0 @1)
3104 (if (fold_real_zero_addition_p (type, @0, 0))
3105 (negate @1)))
3106
3107/* Transform x * -1 into -x. */
3108(simplify
3109 (mult @0 integer_minus_onep)
3110 (negate @0))
eaeba53a 3111
b771c609
AM
3112/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
3113 signed overflow for CST != 0 && CST != -1. */
3114(simplify
b46ebc6c 3115 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
b771c609 3116 (if (TREE_CODE (@2) != INTEGER_CST
b46ebc6c 3117 && single_use (@3)
b771c609
AM
3118 && !integer_zerop (@1) && !integer_minus_onep (@1))
3119 (mult (mult @0 @2) @1)))
3120
96285749
RS
3121/* True if we can easily extract the real and imaginary parts of a complex
3122 number. */
3123(match compositional_complex
3124 (convert? (complex @0 @1)))
3125
eaeba53a
RB
3126/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
3127(simplify
3128 (complex (realpart @0) (imagpart @0))
3129 @0)
3130(simplify
3131 (realpart (complex @0 @1))
3132 @0)
3133(simplify
3134 (imagpart (complex @0 @1))
3135 @1)
83633539 3136
77c028c5
MG
3137/* Sometimes we only care about half of a complex expression. */
3138(simplify
3139 (realpart (convert?:s (conj:s @0)))
3140 (convert (realpart @0)))
3141(simplify
3142 (imagpart (convert?:s (conj:s @0)))
3143 (convert (negate (imagpart @0))))
3144(for part (realpart imagpart)
3145 (for op (plus minus)
3146 (simplify
3147 (part (convert?:s@2 (op:s @0 @1)))
3148 (convert (op (part @0) (part @1))))))
3149(simplify
3150 (realpart (convert?:s (CEXPI:s @0)))
3151 (convert (COS @0)))
3152(simplify
3153 (imagpart (convert?:s (CEXPI:s @0)))
3154 (convert (SIN @0)))
3155
3156/* conj(conj(x)) -> x */
3157(simplify
3158 (conj (convert? (conj @0)))
3159 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
3160 (convert @0)))
3161
3162/* conj({x,y}) -> {x,-y} */
3163(simplify
3164 (conj (convert?:s (complex:s @0 @1)))
3165 (with { tree itype = TREE_TYPE (type); }
3166 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
3167
3168/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
3169(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
3170 (simplify
3171 (bswap (bswap @0))
3172 @0)
3173 (simplify
3174 (bswap (bit_not (bswap @0)))
3175 (bit_not @0))
3176 (for bitop (bit_xor bit_ior bit_and)
3177 (simplify
3178 (bswap (bitop:c (bswap @0) @1))
3179 (bitop @0 (bswap @1)))))
96994de0
RB
3180
3181
3182/* Combine COND_EXPRs and VEC_COND_EXPRs. */
3183
3184/* Simplify constant conditions.
3185 Only optimize constant conditions when the selected branch
3186 has the same type as the COND_EXPR. This avoids optimizing
3187 away "c ? x : throw", where the throw has a void type.
3188 Note that we cannot throw away the fold-const.c variant nor
3189 this one as we depend on doing this transform before possibly
3190 A ? B : B -> B triggers and the fold-const.c one can optimize
3191 0 ? A : B to B even if A has side-effects. Something
3192 genmatch cannot handle. */
3193(simplify
3194 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
3195 (if (integer_zerop (@0))
3196 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
3197 @2)
3198 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
3199 @1)))
96994de0
RB
3200(simplify
3201 (vec_cond VECTOR_CST@0 @1 @2)
3202 (if (integer_all_onesp (@0))
8fdc6c67
RB
3203 @1
3204 (if (integer_zerop (@0))
3205 @2)))
96994de0 3206
34a13a52
MG
3207/* Sink unary operations to constant branches, but only if we do fold it to
3208 constants. */
3209(for op (negate bit_not abs absu)
3210 (simplify
3211 (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2))
3212 (with
3213 {
3214 tree cst1, cst2;
3215 cst1 = const_unop (op, type, @1);
3216 if (cst1)
3217 cst2 = const_unop (op, type, @2);
3218 }
3219 (if (cst1 && cst2)
3220 (vec_cond @0 { cst1; } { cst2; })))))
3221
b5481987
BC
3222/* Simplification moved from fold_cond_expr_with_comparison. It may also
3223 be extended. */
e2535011
BC
3224/* This pattern implements two kinds simplification:
3225
3226 Case 1)
3227 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
b5481987
BC
3228 1) Conversions are type widening from smaller type.
3229 2) Const c1 equals to c2 after canonicalizing comparison.
3230 3) Comparison has tree code LT, LE, GT or GE.
3231 This specific pattern is needed when (cmp (convert x) c) may not
3232 be simplified by comparison patterns because of multiple uses of
3233 x. It also makes sense here because simplifying across multiple
e2535011
BC
3234 referred var is always benefitial for complicated cases.
3235
3236 Case 2)
3237 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
3238(for cmp (lt le gt ge eq)
b5481987 3239 (simplify
ae22bc5d 3240 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
b5481987
BC
3241 (with
3242 {
3243 tree from_type = TREE_TYPE (@1);
3244 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
ae22bc5d 3245 enum tree_code code = ERROR_MARK;
b5481987 3246
ae22bc5d
BC
3247 if (INTEGRAL_TYPE_P (from_type)
3248 && int_fits_type_p (@2, from_type)
b5481987
BC
3249 && (types_match (c1_type, from_type)
3250 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
3251 && (TYPE_UNSIGNED (from_type)
3252 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
3253 && (types_match (c2_type, from_type)
3254 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
3255 && (TYPE_UNSIGNED (from_type)
3256 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
3257 {
ae22bc5d 3258 if (cmp != EQ_EXPR)
b5481987 3259 {
e2535011
BC
3260 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
3261 {
3262 /* X <= Y - 1 equals to X < Y. */
ae22bc5d 3263 if (cmp == LE_EXPR)
e2535011
BC
3264 code = LT_EXPR;
3265 /* X > Y - 1 equals to X >= Y. */
ae22bc5d 3266 if (cmp == GT_EXPR)
e2535011
BC
3267 code = GE_EXPR;
3268 }
3269 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
3270 {
3271 /* X < Y + 1 equals to X <= Y. */
ae22bc5d 3272 if (cmp == LT_EXPR)
e2535011
BC
3273 code = LE_EXPR;
3274 /* X >= Y + 1 equals to X > Y. */
ae22bc5d 3275 if (cmp == GE_EXPR)
e2535011
BC
3276 code = GT_EXPR;
3277 }
ae22bc5d
BC
3278 if (code != ERROR_MARK
3279 || wi::to_widest (@2) == wi::to_widest (@3))
e2535011 3280 {
ae22bc5d 3281 if (cmp == LT_EXPR || cmp == LE_EXPR)
e2535011 3282 code = MIN_EXPR;
ae22bc5d 3283 if (cmp == GT_EXPR || cmp == GE_EXPR)
e2535011
BC
3284 code = MAX_EXPR;
3285 }
b5481987 3286 }
e2535011 3287 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
ae22bc5d
BC
3288 else if (int_fits_type_p (@3, from_type))
3289 code = EQ_EXPR;
b5481987
BC
3290 }
3291 }
3292 (if (code == MAX_EXPR)
21aaaf1e 3293 (convert (max @1 (convert @2)))
b5481987 3294 (if (code == MIN_EXPR)
21aaaf1e 3295 (convert (min @1 (convert @2)))
e2535011 3296 (if (code == EQ_EXPR)
ae22bc5d 3297 (convert (cond (eq @1 (convert @3))
21aaaf1e 3298 (convert:from_type @3) (convert:from_type @2)))))))))
b5481987 3299
714445ae
BC
3300/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
3301
3302 1) OP is PLUS or MINUS.
3303 2) CMP is LT, LE, GT or GE.
3304 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
3305
3306 This pattern also handles special cases like:
3307
3308 A) Operand x is a unsigned to signed type conversion and c1 is
3309 integer zero. In this case,
3310 (signed type)x < 0 <=> x > MAX_VAL(signed type)
3311 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
3312 B) Const c1 may not equal to (C3 op' C2). In this case we also
3313 check equality for (c1+1) and (c1-1) by adjusting comparison
3314 code.
3315
3316 TODO: Though signed type is handled by this pattern, it cannot be
3317 simplified at the moment because C standard requires additional
3318 type promotion. In order to match&simplify it here, the IR needs
3319 to be cleaned up by other optimizers, i.e, VRP. */
3320(for op (plus minus)
3321 (for cmp (lt le gt ge)
3322 (simplify
3323 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
3324 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
3325 (if (types_match (from_type, to_type)
3326 /* Check if it is special case A). */
3327 || (TYPE_UNSIGNED (from_type)
3328 && !TYPE_UNSIGNED (to_type)
3329 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
3330 && integer_zerop (@1)
3331 && (cmp == LT_EXPR || cmp == GE_EXPR)))
3332 (with
3333 {
4a669ac3 3334 wi::overflow_type overflow = wi::OVF_NONE;
714445ae 3335 enum tree_code code, cmp_code = cmp;
8e6cdc90
RS
3336 wide_int real_c1;
3337 wide_int c1 = wi::to_wide (@1);
3338 wide_int c2 = wi::to_wide (@2);
3339 wide_int c3 = wi::to_wide (@3);
714445ae
BC
3340 signop sgn = TYPE_SIGN (from_type);
3341
3342 /* Handle special case A), given x of unsigned type:
3343 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
3344 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
3345 if (!types_match (from_type, to_type))
3346 {
3347 if (cmp_code == LT_EXPR)
3348 cmp_code = GT_EXPR;
3349 if (cmp_code == GE_EXPR)
3350 cmp_code = LE_EXPR;
3351 c1 = wi::max_value (to_type);
3352 }
3353 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
3354 compute (c3 op' c2) and check if it equals to c1 with op' being
3355 the inverted operator of op. Make sure overflow doesn't happen
3356 if it is undefined. */
3357 if (op == PLUS_EXPR)
3358 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3359 else
3360 real_c1 = wi::add (c3, c2, sgn, &overflow);
3361
3362 code = cmp_code;
3363 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3364 {
3365 /* Check if c1 equals to real_c1. Boundary condition is handled
3366 by adjusting comparison operation if necessary. */
3367 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3368 && !overflow)
3369 {
3370 /* X <= Y - 1 equals to X < Y. */
3371 if (cmp_code == LE_EXPR)
3372 code = LT_EXPR;
3373 /* X > Y - 1 equals to X >= Y. */
3374 if (cmp_code == GT_EXPR)
3375 code = GE_EXPR;
3376 }
3377 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3378 && !overflow)
3379 {
3380 /* X < Y + 1 equals to X <= Y. */
3381 if (cmp_code == LT_EXPR)
3382 code = LE_EXPR;
3383 /* X >= Y + 1 equals to X > Y. */
3384 if (cmp_code == GE_EXPR)
3385 code = GT_EXPR;
3386 }
3387 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3388 {
3389 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3390 code = MIN_EXPR;
3391 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3392 code = MAX_EXPR;
3393 }
3394 }
3395 }
3396 (if (code == MAX_EXPR)
3397 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3398 { wide_int_to_tree (from_type, c2); })
3399 (if (code == MIN_EXPR)
3400 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3401 { wide_int_to_tree (from_type, c2); })))))))))
3402
96994de0
RB
3403(for cnd (cond vec_cond)
3404 /* A ? B : (A ? X : C) -> A ? B : C. */
3405 (simplify
3406 (cnd @0 (cnd @0 @1 @2) @3)
3407 (cnd @0 @1 @3))
3408 (simplify
3409 (cnd @0 @1 (cnd @0 @2 @3))
3410 (cnd @0 @1 @3))
24a179f8
RB
3411 /* A ? B : (!A ? C : X) -> A ? B : C. */
3412 /* ??? This matches embedded conditions open-coded because genmatch
3413 would generate matching code for conditions in separate stmts only.
3414 The following is still important to merge then and else arm cases
3415 from if-conversion. */
3416 (simplify
3417 (cnd @0 @1 (cnd @2 @3 @4))
2c58d42c 3418 (if (inverse_conditions_p (@0, @2))
24a179f8
RB
3419 (cnd @0 @1 @3)))
3420 (simplify
3421 (cnd @0 (cnd @1 @2 @3) @4)
2c58d42c 3422 (if (inverse_conditions_p (@0, @1))
24a179f8 3423 (cnd @0 @3 @4)))
96994de0
RB
3424
3425 /* A ? B : B -> B. */
3426 (simplify
3427 (cnd @0 @1 @1)
09240451 3428 @1)
96994de0 3429
09240451
MG
3430 /* !A ? B : C -> A ? C : B. */
3431 (simplify
3432 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3433 (cnd @0 @2 @1)))
f84e7fd6 3434
a3ca1bc5
RB
3435/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3436 return all -1 or all 0 results. */
f43d102e
RS
3437/* ??? We could instead convert all instances of the vec_cond to negate,
3438 but that isn't necessarily a win on its own. */
3439(simplify
a3ca1bc5 3440 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3441 (if (VECTOR_TYPE_P (type)
928686b1
RS
3442 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3443 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3444 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3445 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3446 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 3447
a3ca1bc5 3448/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 3449(simplify
a3ca1bc5 3450 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3451 (if (VECTOR_TYPE_P (type)
928686b1
RS
3452 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3453 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3454 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3455 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3456 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 3457
2ee05f1e 3458
f84e7fd6
RB
3459/* Simplifications of comparisons. */
3460
24f1db9c
RB
3461/* See if we can reduce the magnitude of a constant involved in a
3462 comparison by changing the comparison code. This is a canonicalization
3463 formerly done by maybe_canonicalize_comparison_1. */
3464(for cmp (le gt)
3465 acmp (lt ge)
3466 (simplify
f06e47d7
JJ
3467 (cmp @0 uniform_integer_cst_p@1)
3468 (with { tree cst = uniform_integer_cst_p (@1); }
3469 (if (tree_int_cst_sgn (cst) == -1)
3470 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3471 wide_int_to_tree (TREE_TYPE (cst),
3472 wi::to_wide (cst)
3473 + 1)); })))))
24f1db9c
RB
3474(for cmp (ge lt)
3475 acmp (gt le)
3476 (simplify
f06e47d7
JJ
3477 (cmp @0 uniform_integer_cst_p@1)
3478 (with { tree cst = uniform_integer_cst_p (@1); }
3479 (if (tree_int_cst_sgn (cst) == 1)
3480 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3481 wide_int_to_tree (TREE_TYPE (cst),
3482 wi::to_wide (cst) - 1)); })))))
24f1db9c 3483
f84e7fd6
RB
3484/* We can simplify a logical negation of a comparison to the
3485 inverted comparison. As we cannot compute an expression
3486 operator using invert_tree_comparison we have to simulate
3487 that with expression code iteration. */
3488(for cmp (tcc_comparison)
3489 icmp (inverted_tcc_comparison)
3490 ncmp (inverted_tcc_comparison_with_nans)
3491 /* Ideally we'd like to combine the following two patterns
3492 and handle some more cases by using
3493 (logical_inverted_value (cmp @0 @1))
3494 here but for that genmatch would need to "inline" that.
3495 For now implement what forward_propagate_comparison did. */
3496 (simplify
3497 (bit_not (cmp @0 @1))
3498 (if (VECTOR_TYPE_P (type)
3499 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3500 /* Comparison inversion may be impossible for trapping math,
3501 invert_tree_comparison will tell us. But we can't use
3502 a computed operator in the replacement tree thus we have
3503 to play the trick below. */
3504 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3505 (cmp, HONOR_NANS (@0)); }
f84e7fd6 3506 (if (ic == icmp)
8fdc6c67
RB
3507 (icmp @0 @1)
3508 (if (ic == ncmp)
3509 (ncmp @0 @1))))))
f84e7fd6 3510 (simplify
09240451
MG
3511 (bit_xor (cmp @0 @1) integer_truep)
3512 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3513 (cmp, HONOR_NANS (@0)); }
09240451 3514 (if (ic == icmp)
8fdc6c67
RB
3515 (icmp @0 @1)
3516 (if (ic == ncmp)
3517 (ncmp @0 @1))))))
e18c1d66 3518
2ee05f1e
RB
3519/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3520 ??? The transformation is valid for the other operators if overflow
3521 is undefined for the type, but performing it here badly interacts
3522 with the transformation in fold_cond_expr_with_comparison which
3523 attempts to synthetize ABS_EXPR. */
3524(for cmp (eq ne)
1af4ebf5
MG
3525 (for sub (minus pointer_diff)
3526 (simplify
3527 (cmp (sub@2 @0 @1) integer_zerop)
3528 (if (single_use (@2))
3529 (cmp @0 @1)))))
2ee05f1e
RB
3530
3531/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3532 signed arithmetic case. That form is created by the compiler
3533 often enough for folding it to be of value. One example is in
3534 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
3535(for cmp (simple_comparison)
3536 scmp (swapped_simple_comparison)
2ee05f1e 3537 (simplify
bc6e9db4 3538 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
3539 /* Handle unfolded multiplication by zero. */
3540 (if (integer_zerop (@1))
8fdc6c67
RB
3541 (cmp @1 @2)
3542 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
3543 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3544 && single_use (@3))
8fdc6c67
RB
3545 /* If @1 is negative we swap the sense of the comparison. */
3546 (if (tree_int_cst_sgn (@1) < 0)
3547 (scmp @0 @2)
3548 (cmp @0 @2))))))
03cc70b5 3549
2ee05f1e
RB
3550/* Simplify comparison of something with itself. For IEEE
3551 floating-point, we can only do some of these simplifications. */
287f8f17 3552(for cmp (eq ge le)
2ee05f1e
RB
3553 (simplify
3554 (cmp @0 @0)
287f8f17 3555 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3556 || ! HONOR_NANS (@0))
287f8f17
RB
3557 { constant_boolean_node (true, type); }
3558 (if (cmp != EQ_EXPR)
3559 (eq @0 @0)))))
2ee05f1e
RB
3560(for cmp (ne gt lt)
3561 (simplify
3562 (cmp @0 @0)
3563 (if (cmp != NE_EXPR
3564 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3565 || ! HONOR_NANS (@0))
2ee05f1e 3566 { constant_boolean_node (false, type); })))
b5d3d787
RB
3567(for cmp (unle unge uneq)
3568 (simplify
3569 (cmp @0 @0)
3570 { constant_boolean_node (true, type); }))
dd53d197
MG
3571(for cmp (unlt ungt)
3572 (simplify
3573 (cmp @0 @0)
3574 (unordered @0 @0)))
b5d3d787
RB
3575(simplify
3576 (ltgt @0 @0)
3577 (if (!flag_trapping_math)
3578 { constant_boolean_node (false, type); }))
2ee05f1e
RB
3579
3580/* Fold ~X op ~Y as Y op X. */
07cdc2b8 3581(for cmp (simple_comparison)
2ee05f1e 3582 (simplify
7fe996ba
RB
3583 (cmp (bit_not@2 @0) (bit_not@3 @1))
3584 (if (single_use (@2) && single_use (@3))
3585 (cmp @1 @0))))
2ee05f1e
RB
3586
3587/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
3588(for cmp (simple_comparison)
3589 scmp (swapped_simple_comparison)
2ee05f1e 3590 (simplify
7fe996ba
RB
3591 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3592 (if (single_use (@2)
3593 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
3594 (scmp @0 (bit_not @1)))))
3595
07cdc2b8
RB
3596(for cmp (simple_comparison)
3597 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3598 (simplify
3599 (cmp (convert@2 @0) (convert? @1))
3600 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3601 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3602 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3603 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3604 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3605 (with
3606 {
3607 tree type1 = TREE_TYPE (@1);
3608 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3609 {
3610 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3611 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3612 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3613 type1 = float_type_node;
3614 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3615 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3616 type1 = double_type_node;
3617 }
3618 tree newtype
3619 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
03cc70b5 3620 ? TREE_TYPE (@0) : type1);
07cdc2b8
RB
3621 }
3622 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3623 (cmp (convert:newtype @0) (convert:newtype @1))))))
03cc70b5 3624
07cdc2b8
RB
3625 (simplify
3626 (cmp @0 REAL_CST@1)
3627 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
3628 (switch
3629 /* a CMP (-0) -> a CMP 0 */
3630 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3631 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3632 /* x != NaN is always true, other ops are always false. */
3633 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3634 && ! HONOR_SNANS (@1))
3635 { constant_boolean_node (cmp == NE_EXPR, type); })
3636 /* Fold comparisons against infinity. */
3637 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3638 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3639 (with
3640 {
3641 REAL_VALUE_TYPE max;
3642 enum tree_code code = cmp;
3643 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3644 if (neg)
3645 code = swap_tree_comparison (code);
3646 }
3647 (switch
e96a5786 3648 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
64d3a1f0 3649 (if (code == GT_EXPR
e96a5786 3650 && !(HONOR_NANS (@0) && flag_trapping_math))
64d3a1f0
RB
3651 { constant_boolean_node (false, type); })
3652 (if (code == LE_EXPR)
e96a5786 3653 /* x <= +Inf is always true, if we don't care about NaNs. */
64d3a1f0
RB
3654 (if (! HONOR_NANS (@0))
3655 { constant_boolean_node (true, type); }
e96a5786
JM
3656 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3657 an "invalid" exception. */
3658 (if (!flag_trapping_math)
3659 (eq @0 @0))))
3660 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3661 for == this introduces an exception for x a NaN. */
3662 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3663 || code == GE_EXPR)
64d3a1f0
RB
3664 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3665 (if (neg)
3666 (lt @0 { build_real (TREE_TYPE (@0), max); })
3667 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3668 /* x < +Inf is always equal to x <= DBL_MAX. */
3669 (if (code == LT_EXPR)
3670 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3671 (if (neg)
3672 (ge @0 { build_real (TREE_TYPE (@0), max); })
3673 (le @0 { build_real (TREE_TYPE (@0), max); }))))
e96a5786
JM
3674 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3675 an exception for x a NaN so use an unordered comparison. */
64d3a1f0
RB
3676 (if (code == NE_EXPR)
3677 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3678 (if (! HONOR_NANS (@0))
3679 (if (neg)
3680 (ge @0 { build_real (TREE_TYPE (@0), max); })
3681 (le @0 { build_real (TREE_TYPE (@0), max); }))
3682 (if (neg)
e96a5786
JM
3683 (unge @0 { build_real (TREE_TYPE (@0), max); })
3684 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
07cdc2b8
RB
3685
3686 /* If this is a comparison of a real constant with a PLUS_EXPR
3687 or a MINUS_EXPR of a real constant, we can convert it into a
3688 comparison with a revised real constant as long as no overflow
3689 occurs when unsafe_math_optimizations are enabled. */
3690 (if (flag_unsafe_math_optimizations)
3691 (for op (plus minus)
3692 (simplify
3693 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3694 (with
3695 {
3696 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3697 TREE_TYPE (@1), @2, @1);
3698 }
f980c9a2 3699 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
3700 (cmp @0 { tem; }))))))
3701
3702 /* Likewise, we can simplify a comparison of a real constant with
3703 a MINUS_EXPR whose first operand is also a real constant, i.e.
3704 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3705 floating-point types only if -fassociative-math is set. */
3706 (if (flag_associative_math)
3707 (simplify
0409237b 3708 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 3709 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 3710 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
3711 (cmp { tem; } @1)))))
3712
3713 /* Fold comparisons against built-in math functions. */
3714 (if (flag_unsafe_math_optimizations
3715 && ! flag_errno_math)
3716 (for sq (SQRT)
3717 (simplify
3718 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
3719 (switch
3720 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3721 (switch
3722 /* sqrt(x) < y is always false, if y is negative. */
3723 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 3724 { constant_boolean_node (false, type); })
64d3a1f0
RB
3725 /* sqrt(x) > y is always true, if y is negative and we
3726 don't care about NaNs, i.e. negative values of x. */
3727 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3728 { constant_boolean_node (true, type); })
3729 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3730 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
3731 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3732 (switch
3733 /* sqrt(x) < 0 is always false. */
3734 (if (cmp == LT_EXPR)
3735 { constant_boolean_node (false, type); })
3736 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3737 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3738 { constant_boolean_node (true, type); })
3739 /* sqrt(x) <= 0 -> x == 0. */
3740 (if (cmp == LE_EXPR)
3741 (eq @0 @1))
3742 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3743 == or !=. In the last case:
3744
3745 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3746
3747 if x is negative or NaN. Due to -funsafe-math-optimizations,
3748 the results for other x follow from natural arithmetic. */
3749 (cmp @0 @1)))
64d3a1f0
RB
3750 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3751 (with
3752 {
3753 REAL_VALUE_TYPE c2;
5c88ea94
RS
3754 real_arithmetic (&c2, MULT_EXPR,
3755 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
3756 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3757 }
3758 (if (REAL_VALUE_ISINF (c2))
3759 /* sqrt(x) > y is x == +Inf, when y is very large. */
3760 (if (HONOR_INFINITIES (@0))
3761 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3762 { constant_boolean_node (false, type); })
3763 /* sqrt(x) > c is the same as x > c*c. */
3764 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3765 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3766 (with
3767 {
3768 REAL_VALUE_TYPE c2;
5c88ea94
RS
3769 real_arithmetic (&c2, MULT_EXPR,
3770 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
3771 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3772 }
3773 (if (REAL_VALUE_ISINF (c2))
3774 (switch
3775 /* sqrt(x) < y is always true, when y is a very large
3776 value and we don't care about NaNs or Infinities. */
3777 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3778 { constant_boolean_node (true, type); })
3779 /* sqrt(x) < y is x != +Inf when y is very large and we
3780 don't care about NaNs. */
3781 (if (! HONOR_NANS (@0))
3782 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3783 /* sqrt(x) < y is x >= 0 when y is very large and we
3784 don't care about Infinities. */
3785 (if (! HONOR_INFINITIES (@0))
3786 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3787 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3788 (if (GENERIC)
3789 (truth_andif
3790 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3791 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3792 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3793 (if (! HONOR_NANS (@0))
3794 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3795 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3796 (if (GENERIC)
3797 (truth_andif
3798 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
0ca2e7f7
PK
3799 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3800 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3801 (simplify
3802 (cmp (sq @0) (sq @1))
3803 (if (! HONOR_NANS (@0))
3804 (cmp @0 @1))))))
2ee05f1e 3805
e41ec71b 3806/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
f3842847
YG
3807(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3808 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
e41ec71b
YG
3809 (simplify
3810 (cmp (float@0 @1) (float @2))
3811 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3812 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3813 (with
3814 {
3815 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3816 tree type1 = TREE_TYPE (@1);
3817 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3818 tree type2 = TREE_TYPE (@2);
3819 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3820 }
3821 (if (fmt.can_represent_integral_type_p (type1)
3822 && fmt.can_represent_integral_type_p (type2))
f3842847
YG
3823 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3824 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3825 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3826 && type1_signed_p >= type2_signed_p)
3827 (icmp @1 (convert @2))
3828 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3829 && type1_signed_p <= type2_signed_p)
3830 (icmp (convert:type2 @1) @2)
3831 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3832 && type1_signed_p == type2_signed_p)
3833 (icmp @1 @2))))))))))
e41ec71b 3834
c779bea5
YG
3835/* Optimize various special cases of (FTYPE) N CMP CST. */
3836(for cmp (lt le eq ne ge gt)
3837 icmp (le le eq ne ge ge)
3838 (simplify
3839 (cmp (float @0) REAL_CST@1)
3840 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3841 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3842 (with
3843 {
3844 tree itype = TREE_TYPE (@0);
c779bea5
YG
3845 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3846 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3847 /* Be careful to preserve any potential exceptions due to
3848 NaNs. qNaNs are ok in == or != context.
3849 TODO: relax under -fno-trapping-math or
3850 -fno-signaling-nans. */
3851 bool exception_p
3852 = real_isnan (cst) && (cst->signalling
c651dca2 3853 || (cmp != EQ_EXPR && cmp != NE_EXPR));
c779bea5
YG
3854 }
3855 /* TODO: allow non-fitting itype and SNaNs when
3856 -fno-trapping-math. */
e41ec71b 3857 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
c779bea5
YG
3858 (with
3859 {
e41ec71b 3860 signop isign = TYPE_SIGN (itype);
c779bea5
YG
3861 REAL_VALUE_TYPE imin, imax;
3862 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3863 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3864
3865 REAL_VALUE_TYPE icst;
3866 if (cmp == GT_EXPR || cmp == GE_EXPR)
3867 real_ceil (&icst, fmt, cst);
3868 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3869 real_floor (&icst, fmt, cst);
3870 else
3871 real_trunc (&icst, fmt, cst);
3872
b09bf97b 3873 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
c779bea5
YG
3874
3875 bool overflow_p = false;
3876 wide_int icst_val
3877 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3878 }
3879 (switch
3880 /* Optimize cases when CST is outside of ITYPE's range. */
3881 (if (real_compare (LT_EXPR, cst, &imin))
3882 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3883 type); })
3884 (if (real_compare (GT_EXPR, cst, &imax))
3885 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3886 type); })
3887 /* Remove cast if CST is an integer representable by ITYPE. */
3888 (if (cst_int_p)
3889 (cmp @0 { gcc_assert (!overflow_p);
3890 wide_int_to_tree (itype, icst_val); })
3891 )
3892 /* When CST is fractional, optimize
3893 (FTYPE) N == CST -> 0
3894 (FTYPE) N != CST -> 1. */
3895 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
03cc70b5 3896 { constant_boolean_node (cmp == NE_EXPR, type); })
c779bea5
YG
3897 /* Otherwise replace with sensible integer constant. */
3898 (with
3899 {
3900 gcc_checking_assert (!overflow_p);
3901 }
3902 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3903
40fd269a
MG
3904/* Fold A /[ex] B CMP C to A CMP B * C. */
3905(for cmp (eq ne)
3906 (simplify
3907 (cmp (exact_div @0 @1) INTEGER_CST@2)
3908 (if (!integer_zerop (@1))
8e6cdc90 3909 (if (wi::to_wide (@2) == 0)
40fd269a
MG
3910 (cmp @0 @2)
3911 (if (TREE_CODE (@1) == INTEGER_CST)
3912 (with
3913 {
4a669ac3 3914 wi::overflow_type ovf;
8e6cdc90
RS
3915 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3916 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
3917 }
3918 (if (ovf)
3919 { constant_boolean_node (cmp == NE_EXPR, type); }
3920 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3921(for cmp (lt le gt ge)
3922 (simplify
3923 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90 3924 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
40fd269a
MG
3925 (with
3926 {
4a669ac3 3927 wi::overflow_type ovf;
8e6cdc90
RS
3928 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3929 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
3930 }
3931 (if (ovf)
8e6cdc90
RS
3932 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3933 TYPE_SIGN (TREE_TYPE (@2)))
40fd269a
MG
3934 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3935 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3936
9cf60d3b
MG
3937/* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
3938
3939 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
3940 For large C (more than min/B+2^size), this is also true, with the
3941 multiplication computed modulo 2^size.
3942 For intermediate C, this just tests the sign of A. */
3943(for cmp (lt le gt ge)
3944 cmp2 (ge ge lt lt)
3945 (simplify
3946 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
3947 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
3948 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
3949 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3950 (with
3951 {
3952 tree utype = TREE_TYPE (@2);
3953 wide_int denom = wi::to_wide (@1);
3954 wide_int right = wi::to_wide (@2);
3955 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
3956 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
3957 bool small = wi::leu_p (right, smax);
3958 bool large = wi::geu_p (right, smin);
3959 }
3960 (if (small || large)
3961 (cmp (convert:utype @0) (mult @2 (convert @1)))
3962 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
3963
cfdc4f33
MG
3964/* Unordered tests if either argument is a NaN. */
3965(simplify
3966 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 3967 (if (types_match (@0, @1))
cfdc4f33 3968 (unordered @0 @1)))
257b01ba
MG
3969(simplify
3970 (bit_and (ordered @0 @0) (ordered @1 @1))
3971 (if (types_match (@0, @1))
3972 (ordered @0 @1)))
cfdc4f33
MG
3973(simplify
3974 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3975 @2)
257b01ba
MG
3976(simplify
3977 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3978 @2)
e18c1d66 3979
90c6f26c
RB
3980/* Simple range test simplifications. */
3981/* A < B || A >= B -> true. */
5d30c58d
RB
3982(for test1 (lt le le le ne ge)
3983 test2 (ge gt ge ne eq ne)
90c6f26c
RB
3984 (simplify
3985 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3986 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3987 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3988 { constant_boolean_node (true, type); })))
3989/* A < B && A >= B -> false. */
3990(for test1 (lt lt lt le ne eq)
3991 test2 (ge gt eq gt eq gt)
3992 (simplify
3993 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3994 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3995 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3996 { constant_boolean_node (false, type); })))
3997
9ebc3467
YG
3998/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3999 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
4000
4001 Note that comparisons
4002 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
4003 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
4004 will be canonicalized to above so there's no need to
4005 consider them here.
4006 */
4007
4008(for cmp (le gt)
4009 eqcmp (eq ne)
4010 (simplify
4011 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
4012 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4013 (with
4014 {
4015 tree ty = TREE_TYPE (@0);
4016 unsigned prec = TYPE_PRECISION (ty);
4017 wide_int mask = wi::to_wide (@2, prec);
4018 wide_int rhs = wi::to_wide (@3, prec);
4019 signop sgn = TYPE_SIGN (ty);
4020 }
4021 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
4022 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
4023 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
4024 { build_zero_cst (ty); }))))))
4025
534bd33b
MG
4026/* -A CMP -B -> B CMP A. */
4027(for cmp (tcc_comparison)
4028 scmp (swapped_tcc_comparison)
4029 (simplify
4030 (cmp (negate @0) (negate @1))
4031 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4032 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4033 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4034 (scmp @0 @1)))
4035 (simplify
4036 (cmp (negate @0) CONSTANT_CLASS_P@1)
4037 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4038 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4039 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 4040 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
4041 (if (tem && !TREE_OVERFLOW (tem))
4042 (scmp @0 { tem; }))))))
4043
b0eb889b
MG
4044/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
4045(for op (eq ne)
4046 (simplify
4047 (op (abs @0) zerop@1)
4048 (op @0 @1)))
4049
6358a676
MG
4050/* From fold_sign_changed_comparison and fold_widened_comparison.
4051 FIXME: the lack of symmetry is disturbing. */
79d4f7c6
RB
4052(for cmp (simple_comparison)
4053 (simplify
4054 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 4055 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
4056 /* Disable this optimization if we're casting a function pointer
4057 type on targets that require function pointer canonicalization. */
4058 && !(targetm.have_canonicalize_funcptr_for_compare ()
400bc526
JDA
4059 && ((POINTER_TYPE_P (TREE_TYPE (@00))
4060 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
4061 || (POINTER_TYPE_P (TREE_TYPE (@10))
4062 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
2fde61e3 4063 && single_use (@0))
79d4f7c6
RB
4064 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
4065 && (TREE_CODE (@10) == INTEGER_CST
6358a676 4066 || @1 != @10)
79d4f7c6
RB
4067 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
4068 || cmp == NE_EXPR
4069 || cmp == EQ_EXPR)
6358a676 4070 && !POINTER_TYPE_P (TREE_TYPE (@00)))
79d4f7c6
RB
4071 /* ??? The special-casing of INTEGER_CST conversion was in the original
4072 code and here to avoid a spurious overflow flag on the resulting
4073 constant which fold_convert produces. */
4074 (if (TREE_CODE (@1) == INTEGER_CST)
4075 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
4076 TREE_OVERFLOW (@1)); })
4077 (cmp @00 (convert @1)))
4078
4079 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
4080 /* If possible, express the comparison in the shorter mode. */
4081 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
4082 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
4083 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
4084 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
4085 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
4086 || ((TYPE_PRECISION (TREE_TYPE (@00))
4087 >= TYPE_PRECISION (TREE_TYPE (@10)))
4088 && (TYPE_UNSIGNED (TREE_TYPE (@00))
4089 == TYPE_UNSIGNED (TREE_TYPE (@10))))
4090 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 4091 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
4092 && int_fits_type_p (@10, TREE_TYPE (@00)))))
4093 (cmp @00 (convert @10))
4094 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 4095 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
4096 && !int_fits_type_p (@10, TREE_TYPE (@00)))
4097 (with
4098 {
4099 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4100 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4101 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
4102 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
4103 }
4104 (if (above || below)
4105 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
4106 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
4107 (if (cmp == LT_EXPR || cmp == LE_EXPR)
4108 { constant_boolean_node (above ? true : false, type); }
4109 (if (cmp == GT_EXPR || cmp == GE_EXPR)
4110 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 4111
96a111a3
RB
4112(for cmp (eq ne)
4113 /* A local variable can never be pointed to by
4114 the default SSA name of an incoming parameter.
4115 SSA names are canonicalized to 2nd place. */
4116 (simplify
4117 (cmp addr@0 SSA_NAME@1)
4118 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
4119 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
4120 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
4121 (if (TREE_CODE (base) == VAR_DECL
4122 && auto_var_in_fn_p (base, current_function_decl))
4123 (if (cmp == NE_EXPR)
4124 { constant_boolean_node (true, type); }
4125 { constant_boolean_node (false, type); }))))))
4126
66e1cacf
RB
4127/* Equality compare simplifications from fold_binary */
4128(for cmp (eq ne)
4129
4130 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
4131 Similarly for NE_EXPR. */
4132 (simplify
4133 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
4134 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
8e6cdc90 4135 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
66e1cacf
RB
4136 { constant_boolean_node (cmp == NE_EXPR, type); }))
4137
4138 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
4139 (simplify
4140 (cmp (bit_xor @0 @1) integer_zerop)
4141 (cmp @0 @1))
4142
4143 /* (X ^ Y) == Y becomes X == 0.
4144 Likewise (X ^ Y) == X becomes Y == 0. */
4145 (simplify
99e943a2 4146 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
4147 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
4148
4149 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
4150 (simplify
4151 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
4152 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 4153 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
4154
4155 (simplify
4156 (cmp (convert? addr@0) integer_zerop)
4157 (if (tree_single_nonzero_warnv_p (@0, NULL))
4158 { constant_boolean_node (cmp == NE_EXPR, type); })))
4159
b0eb889b
MG
4160/* If we have (A & C) == C where C is a power of 2, convert this into
4161 (A & C) != 0. Similarly for NE_EXPR. */
4162(for cmp (eq ne)
4163 icmp (ne eq)
4164 (simplify
4165 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
4166 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
03cc70b5 4167
519e0faa
PB
4168/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
4169 convert this into a shift followed by ANDing with D. */
4170(simplify
4171 (cond
4172 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
9e61e48e
JJ
4173 INTEGER_CST@2 integer_zerop)
4174 (if (integer_pow2p (@2))
4175 (with {
4176 int shift = (wi::exact_log2 (wi::to_wide (@2))
4177 - wi::exact_log2 (wi::to_wide (@1)));
4178 }
4179 (if (shift > 0)
4180 (bit_and
4181 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
4182 (bit_and
4183 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
4184 @2)))))
519e0faa 4185
b0eb889b
MG
4186/* If we have (A & C) != 0 where C is the sign bit of A, convert
4187 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
4188(for cmp (eq ne)
4189 ncmp (ge lt)
4190 (simplify
4191 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
4192 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2be65d9e 4193 && type_has_mode_precision_p (TREE_TYPE (@0))
b0eb889b 4194 && element_precision (@2) >= element_precision (@0)
8e6cdc90 4195 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
b0eb889b
MG
4196 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
4197 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
4198
519e0faa 4199/* If we have A < 0 ? C : 0 where C is a power of 2, convert
c0140e3c 4200 this into a right shift or sign extension followed by ANDing with C. */
519e0faa
PB
4201(simplify
4202 (cond
4203 (lt @0 integer_zerop)
9e61e48e
JJ
4204 INTEGER_CST@1 integer_zerop)
4205 (if (integer_pow2p (@1)
4206 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
c0140e3c 4207 (with {
8e6cdc90 4208 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
c0140e3c
JJ
4209 }
4210 (if (shift >= 0)
4211 (bit_and
4212 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
4213 @1)
4214 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
4215 sign extension followed by AND with C will achieve the effect. */
4216 (bit_and (convert @0) @1)))))
519e0faa 4217
68aba1f6
RB
4218/* When the addresses are not directly of decls compare base and offset.
4219 This implements some remaining parts of fold_comparison address
4220 comparisons but still no complete part of it. Still it is good
4221 enough to make fold_stmt not regress when not dispatching to fold_binary. */
4222(for cmp (simple_comparison)
4223 (simplify
f501d5cd 4224 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
4225 (with
4226 {
a90c8804 4227 poly_int64 off0, off1;
68aba1f6
RB
4228 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
4229 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
4230 if (base0 && TREE_CODE (base0) == MEM_REF)
4231 {
aca52e6f 4232 off0 += mem_ref_offset (base0).force_shwi ();
68aba1f6
RB
4233 base0 = TREE_OPERAND (base0, 0);
4234 }
4235 if (base1 && TREE_CODE (base1) == MEM_REF)
4236 {
aca52e6f 4237 off1 += mem_ref_offset (base1).force_shwi ();
68aba1f6
RB
4238 base1 = TREE_OPERAND (base1, 0);
4239 }
4240 }
da571fda
RB
4241 (if (base0 && base1)
4242 (with
4243 {
aad88aed 4244 int equal = 2;
70f40fea
JJ
4245 /* Punt in GENERIC on variables with value expressions;
4246 the value expressions might point to fields/elements
4247 of other vars etc. */
4248 if (GENERIC
4249 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
4250 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
4251 ;
4252 else if (decl_in_symtab_p (base0)
4253 && decl_in_symtab_p (base1))
da571fda
RB
4254 equal = symtab_node::get_create (base0)
4255 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
4256 else if ((DECL_P (base0)
4257 || TREE_CODE (base0) == SSA_NAME
4258 || TREE_CODE (base0) == STRING_CST)
4259 && (DECL_P (base1)
4260 || TREE_CODE (base1) == SSA_NAME
4261 || TREE_CODE (base1) == STRING_CST))
aad88aed 4262 equal = (base0 == base1);
93aa3c4a
JJ
4263 if (equal == 0)
4264 {
a4f9edf3
RB
4265 HOST_WIDE_INT ioff0 = -1, ioff1 = -1;
4266 off0.is_constant (&ioff0);
4267 off1.is_constant (&ioff1);
4268 if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST)
4269 || (TREE_CODE (base0) == STRING_CST && DECL_P (base1))
4270 || (TREE_CODE (base0) == STRING_CST
4271 && TREE_CODE (base1) == STRING_CST
4272 && ioff0 >= 0 && ioff1 >= 0
4273 && ioff0 < TREE_STRING_LENGTH (base0)
4274 && ioff1 < TREE_STRING_LENGTH (base1)
4275 /* This is a too conservative test that the STRING_CSTs
4276 will not end up being string-merged. */
4277 && strncmp (TREE_STRING_POINTER (base0) + ioff0,
4278 TREE_STRING_POINTER (base1) + ioff1,
4279 MIN (TREE_STRING_LENGTH (base0) - ioff0,
4280 TREE_STRING_LENGTH (base1) - ioff1)) != 0))
4281 ;
4282 else if (!DECL_P (base0) || !DECL_P (base1))
93aa3c4a
JJ
4283 equal = 2;
4284 else if (cmp != EQ_EXPR && cmp != NE_EXPR)
4285 equal = 2;
4286 /* If this is a pointer comparison, ignore for now even
4287 valid equalities where one pointer is the offset zero
4288 of one object and the other to one past end of another one. */
4289 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2)))
4290 ;
4291 /* Assume that automatic variables can't be adjacent to global
4292 variables. */
4293 else if (is_global_var (base0) != is_global_var (base1))
4294 ;
4295 else
4296 {
4297 tree sz0 = DECL_SIZE_UNIT (base0);
4298 tree sz1 = DECL_SIZE_UNIT (base1);
4299 /* If sizes are unknown, e.g. VLA or not representable,
4300 punt. */
4301 if (!tree_fits_poly_int64_p (sz0)
4302 || !tree_fits_poly_int64_p (sz1))
4303 equal = 2;
4304 else
4305 {
4306 poly_int64 size0 = tree_to_poly_int64 (sz0);
4307 poly_int64 size1 = tree_to_poly_int64 (sz1);
4308 /* If one offset is pointing (or could be) to the beginning
4309 of one object and the other is pointing to one past the
4310 last byte of the other object, punt. */
4311 if (maybe_eq (off0, 0) && maybe_eq (off1, size1))
4312 equal = 2;
4313 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0))
4314 equal = 2;
4315 /* If both offsets are the same, there are some cases
4316 we know that are ok. Either if we know they aren't
4317 zero, or if we know both sizes are no zero. */
4318 if (equal == 2
4319 && known_eq (off0, off1)
4320 && (known_ne (off0, 0)
4321 || (known_ne (size0, 0) && known_ne (size1, 0))))
4322 equal = 0;
4323 }
4324 }
4325 }
da571fda 4326 }
3fccbb9e
JJ
4327 (if (equal == 1
4328 && (cmp == EQ_EXPR || cmp == NE_EXPR
4329 /* If the offsets are equal we can ignore overflow. */
4330 || known_eq (off0, off1)
4331 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
4332 /* Or if we compare using pointers to decls or strings. */
4333 || (POINTER_TYPE_P (TREE_TYPE (@2))
4334 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
da571fda 4335 (switch
a90c8804
RS
4336 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4337 { constant_boolean_node (known_eq (off0, off1), type); })
4338 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4339 { constant_boolean_node (known_ne (off0, off1), type); })
4340 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
4341 { constant_boolean_node (known_lt (off0, off1), type); })
4342 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
4343 { constant_boolean_node (known_le (off0, off1), type); })
4344 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
4345 { constant_boolean_node (known_ge (off0, off1), type); })
4346 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
4347 { constant_boolean_node (known_gt (off0, off1), type); }))
93aa3c4a
JJ
4348 (if (equal == 0)
4349 (switch
4350 (if (cmp == EQ_EXPR)
4351 { constant_boolean_node (false, type); })
4352 (if (cmp == NE_EXPR)
4353 { constant_boolean_node (true, type); })))))))))
66e1cacf 4354
98998245
RB
4355/* Simplify pointer equality compares using PTA. */
4356(for neeq (ne eq)
4357 (simplify
4358 (neeq @0 @1)
4359 (if (POINTER_TYPE_P (TREE_TYPE (@0))
4360 && ptrs_compare_unequal (@0, @1))
f913ff2a 4361 { constant_boolean_node (neeq != EQ_EXPR, type); })))
98998245 4362
8f63caf6 4363/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
4364 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
4365 Disable the transform if either operand is pointer to function.
4366 This broke pr22051-2.c for arm where function pointer
4367 canonicalizaion is not wanted. */
1c0a8806 4368
8f63caf6
RB
4369(for cmp (ne eq)
4370 (simplify
4371 (cmp (convert @0) INTEGER_CST@1)
f53e7e13
JJ
4372 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
4373 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
4374 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4375 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4376 && POINTER_TYPE_P (TREE_TYPE (@1))
4377 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
4378 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
8f63caf6
RB
4379 (cmp @0 (convert @1)))))
4380
21aacde4
RB
4381/* Non-equality compare simplifications from fold_binary */
4382(for cmp (lt gt le ge)
4383 /* Comparisons with the highest or lowest possible integer of
4384 the specified precision will have known values. */
4385 (simplify
f06e47d7
JJ
4386 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
4387 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
4388 || POINTER_TYPE_P (TREE_TYPE (@1))
4389 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
21aacde4
RB
4390 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
4391 (with
4392 {
f06e47d7
JJ
4393 tree cst = uniform_integer_cst_p (@1);
4394 tree arg1_type = TREE_TYPE (cst);
21aacde4
RB
4395 unsigned int prec = TYPE_PRECISION (arg1_type);
4396 wide_int max = wi::max_value (arg1_type);
4397 wide_int signed_max = wi::max_value (prec, SIGNED);
4398 wide_int min = wi::min_value (arg1_type);
4399 }
4400 (switch
f06e47d7 4401 (if (wi::to_wide (cst) == max)
21aacde4
RB
4402 (switch
4403 (if (cmp == GT_EXPR)
4404 { constant_boolean_node (false, type); })
4405 (if (cmp == GE_EXPR)
4406 (eq @2 @1))
4407 (if (cmp == LE_EXPR)
4408 { constant_boolean_node (true, type); })
4409 (if (cmp == LT_EXPR)
4410 (ne @2 @1))))
f06e47d7 4411 (if (wi::to_wide (cst) == min)
21aacde4
RB
4412 (switch
4413 (if (cmp == LT_EXPR)
4414 { constant_boolean_node (false, type); })
4415 (if (cmp == LE_EXPR)
4416 (eq @2 @1))
4417 (if (cmp == GE_EXPR)
4418 { constant_boolean_node (true, type); })
4419 (if (cmp == GT_EXPR)
4420 (ne @2 @1))))
f06e47d7 4421 (if (wi::to_wide (cst) == max - 1)
9bc22d19
RB
4422 (switch
4423 (if (cmp == GT_EXPR)
f06e47d7
JJ
4424 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4425 wide_int_to_tree (TREE_TYPE (cst),
4426 wi::to_wide (cst)
4427 + 1)); }))
9bc22d19 4428 (if (cmp == LE_EXPR)
f06e47d7
JJ
4429 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4430 wide_int_to_tree (TREE_TYPE (cst),
4431 wi::to_wide (cst)
4432 + 1)); }))))
4433 (if (wi::to_wide (cst) == min + 1)
21aacde4
RB
4434 (switch
4435 (if (cmp == GE_EXPR)
f06e47d7
JJ
4436 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4437 wide_int_to_tree (TREE_TYPE (cst),
4438 wi::to_wide (cst)
4439 - 1)); }))
21aacde4 4440 (if (cmp == LT_EXPR)
f06e47d7
JJ
4441 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4442 wide_int_to_tree (TREE_TYPE (cst),
4443 wi::to_wide (cst)
4444 - 1)); }))))
4445 (if (wi::to_wide (cst) == signed_max
21aacde4
RB
4446 && TYPE_UNSIGNED (arg1_type)
4447 /* We will flip the signedness of the comparison operator
4448 associated with the mode of @1, so the sign bit is
4449 specified by this mode. Check that @1 is the signed
4450 max associated with this sign bit. */
7a504f33 4451 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
21aacde4
RB
4452 /* signed_type does not work on pointer types. */
4453 && INTEGRAL_TYPE_P (arg1_type))
4454 /* The following case also applies to X < signed_max+1
4455 and X >= signed_max+1 because previous transformations. */
4456 (if (cmp == LE_EXPR || cmp == GT_EXPR)
f06e47d7
JJ
4457 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4458 (switch
4459 (if (cst == @1 && cmp == LE_EXPR)
4460 (ge (convert:st @0) { build_zero_cst (st); }))
4461 (if (cst == @1 && cmp == GT_EXPR)
4462 (lt (convert:st @0) { build_zero_cst (st); }))
4463 (if (cmp == LE_EXPR)
4464 (ge (view_convert:st @0) { build_zero_cst (st); }))
4465 (if (cmp == GT_EXPR)
4466 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
03cc70b5 4467
b5d3d787
RB
4468(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4469 /* If the second operand is NaN, the result is constant. */
4470 (simplify
4471 (cmp @0 REAL_CST@1)
4472 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4473 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 4474 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 4475 ? false : true, type); })))
21aacde4 4476
55cf3946
RB
4477/* bool_var != 0 becomes bool_var. */
4478(simplify
b5d3d787 4479 (ne @0 integer_zerop)
55cf3946
RB
4480 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4481 && types_match (type, TREE_TYPE (@0)))
4482 (non_lvalue @0)))
4483/* bool_var == 1 becomes bool_var. */
4484(simplify
b5d3d787 4485 (eq @0 integer_onep)
55cf3946
RB
4486 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4487 && types_match (type, TREE_TYPE (@0)))
4488 (non_lvalue @0)))
b5d3d787
RB
4489/* Do not handle
4490 bool_var == 0 becomes !bool_var or
4491 bool_var != 1 becomes !bool_var
4492 here because that only is good in assignment context as long
4493 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4494 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4495 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 4496
ca1206be
MG
4497/* When one argument is a constant, overflow detection can be simplified.
4498 Currently restricted to single use so as not to interfere too much with
4499 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4500 A + CST CMP A -> A CMP' CST' */
4501(for cmp (lt le ge gt)
4502 out (gt gt le le)
4503 (simplify
a8e9f9a3 4504 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
4505 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4506 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
8e6cdc90 4507 && wi::to_wide (@1) != 0
ca1206be 4508 && single_use (@2))
8e6cdc90
RS
4509 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4510 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4511 wi::max_value (prec, UNSIGNED)
4512 - wi::to_wide (@1)); })))))
ca1206be 4513
3563f78f
MG
4514/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4515 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4516 expects the long form, so we restrict the transformation for now. */
4517(for cmp (gt le)
4518 (simplify
a8e9f9a3 4519 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
4520 (if (single_use (@2)
4521 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4522 && TYPE_UNSIGNED (TREE_TYPE (@0))
4523 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4524 (cmp @1 @0))))
3563f78f
MG
4525
4526/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
4527/* A - B > A */
4528(for cmp (gt le)
4529 out (ne eq)
4530 (simplify
a8e9f9a3 4531 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
4532 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4533 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4534 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4535/* A + B < A */
4536(for cmp (lt ge)
4537 out (ne eq)
4538 (simplify
a8e9f9a3 4539 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
4540 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4541 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4542 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4543
603aeb87 4544/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 4545 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
4546(for cmp (lt ge)
4547 out (ne eq)
4548 (simplify
603aeb87 4549 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
4550 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4551 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4552 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 4553
53f3cd25
RS
4554/* Simplification of math builtins. These rules must all be optimizations
4555 as well as IL simplifications. If there is a possibility that the new
4556 form could be a pessimization, the rule should go in the canonicalization
4557 section that follows this one.
e18c1d66 4558
53f3cd25
RS
4559 Rules can generally go in this section if they satisfy one of
4560 the following:
4561
4562 - the rule describes an identity
4563
4564 - the rule replaces calls with something as simple as addition or
4565 multiplication
4566
4567 - the rule contains unary calls only and simplifies the surrounding
4568 arithmetic. (The idea here is to exclude non-unary calls in which
4569 one operand is constant and in which the call is known to be cheap
4570 when the operand has that value.) */
52c6378a 4571
53f3cd25 4572(if (flag_unsafe_math_optimizations)
52c6378a
N
4573 /* Simplify sqrt(x) * sqrt(x) -> x. */
4574 (simplify
c6cfa2bf 4575 (mult (SQRT_ALL@1 @0) @1)
52c6378a
N
4576 (if (!HONOR_SNANS (type))
4577 @0))
4578
ed17cb57
JW
4579 (for op (plus minus)
4580 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4581 (simplify
4582 (op (rdiv @0 @1)
4583 (rdiv @2 @1))
4584 (rdiv (op @0 @2) @1)))
4585
5e21d765
WD
4586 (for cmp (lt le gt ge)
4587 neg_cmp (gt ge lt le)
4588 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4589 (simplify
4590 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4591 (with
4592 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4593 (if (tem
4594 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4595 || (real_zerop (tem) && !real_zerop (@1))))
4596 (switch
4597 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4598 (cmp @0 { tem; }))
4599 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4600 (neg_cmp @0 { tem; })))))))
4601
35401640
N
4602 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4603 (for root (SQRT CBRT)
4604 (simplify
4605 (mult (root:s @0) (root:s @1))
4606 (root (mult @0 @1))))
4607
35401640
N
4608 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4609 (for exps (EXP EXP2 EXP10 POW10)
4610 (simplify
4611 (mult (exps:s @0) (exps:s @1))
4612 (exps (plus @0 @1))))
4613
52c6378a 4614 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
4615 (for root (SQRT CBRT)
4616 (simplify
4617 (rdiv @0 (root:s (rdiv:s @1 @2)))
4618 (mult @0 (root (rdiv @2 @1)))))
4619
4620 /* Simplify x/expN(y) into x*expN(-y). */
4621 (for exps (EXP EXP2 EXP10 POW10)
4622 (simplify
4623 (rdiv @0 (exps:s @1))
4624 (mult @0 (exps (negate @1)))))
52c6378a 4625
eee7b6c4
RB
4626 (for logs (LOG LOG2 LOG10 LOG10)
4627 exps (EXP EXP2 EXP10 POW10)
8acda9b2 4628 /* logN(expN(x)) -> x. */
e18c1d66
RB
4629 (simplify
4630 (logs (exps @0))
8acda9b2
RS
4631 @0)
4632 /* expN(logN(x)) -> x. */
4633 (simplify
4634 (exps (logs @0))
4635 @0))
53f3cd25 4636
e18c1d66
RB
4637 /* Optimize logN(func()) for various exponential functions. We
4638 want to determine the value "x" and the power "exponent" in
4639 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
4640 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4641 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
4642 (simplify
4643 (logs (exps @0))
c9e926ce
RS
4644 (if (SCALAR_FLOAT_TYPE_P (type))
4645 (with {
4646 tree x;
4647 switch (exps)
4648 {
4649 CASE_CFN_EXP:
4650 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4651 x = build_real_truncate (type, dconst_e ());
4652 break;
4653 CASE_CFN_EXP2:
4654 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4655 x = build_real (type, dconst2);
4656 break;
4657 CASE_CFN_EXP10:
4658 CASE_CFN_POW10:
4659 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4660 {
4661 REAL_VALUE_TYPE dconst10;
4662 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4663 x = build_real (type, dconst10);
4664 }
4665 break;
4666 default:
4667 gcc_unreachable ();
4668 }
4669 }
4670 (mult (logs { x; }) @0)))))
53f3cd25 4671
e18c1d66
RB
4672 (for logs (LOG LOG
4673 LOG2 LOG2
4674 LOG10 LOG10)
4675 exps (SQRT CBRT)
4676 (simplify
4677 (logs (exps @0))
c9e926ce
RS
4678 (if (SCALAR_FLOAT_TYPE_P (type))
4679 (with {
4680 tree x;
4681 switch (exps)
4682 {
4683 CASE_CFN_SQRT:
4684 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4685 x = build_real (type, dconsthalf);
4686 break;
4687 CASE_CFN_CBRT:
4688 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4689 x = build_real_truncate (type, dconst_third ());
4690 break;
4691 default:
4692 gcc_unreachable ();
4693 }
4694 }
4695 (mult { x; } (logs @0))))))
53f3cd25
RS
4696
4697 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
4698 (for logs (LOG LOG2 LOG10)
4699 pows (POW)
4700 (simplify
4701 (logs (pows @0 @1))
53f3cd25
RS
4702 (mult @1 (logs @0))))
4703
848bb6fc
JJ
4704 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4705 or if C is a positive power of 2,
4706 pow(C,x) -> exp2(log2(C)*x). */
30a2c10e 4707#if GIMPLE
e83fe013
WD
4708 (for pows (POW)
4709 exps (EXP)
4710 logs (LOG)
848bb6fc
JJ
4711 exp2s (EXP2)
4712 log2s (LOG2)
e83fe013
WD
4713 (simplify
4714 (pows REAL_CST@0 @1)
848bb6fc 4715 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
ef7866a3
JJ
4716 && real_isfinite (TREE_REAL_CST_PTR (@0))
4717 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4718 the use_exp2 case until after vectorization. It seems actually
4719 beneficial for all constants to postpone this until later,
4720 because exp(log(C)*x), while faster, will have worse precision
4721 and if x folds into a constant too, that is unnecessary
4722 pessimization. */
4723 && canonicalize_math_after_vectorization_p ())
848bb6fc
JJ
4724 (with {
4725 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4726 bool use_exp2 = false;
4727 if (targetm.libc_has_function (function_c99_misc)
4728 && value->cl == rvc_normal)
4729 {
4730 REAL_VALUE_TYPE frac_rvt = *value;
4731 SET_REAL_EXP (&frac_rvt, 1);
4732 if (real_equal (&frac_rvt, &dconst1))
4733 use_exp2 = true;
4734 }
4735 }
4736 (if (!use_exp2)
30a2c10e
JJ
4737 (if (optimize_pow_to_exp (@0, @1))
4738 (exps (mult (logs @0) @1)))
ef7866a3 4739 (exp2s (mult (log2s @0) @1)))))))
30a2c10e 4740#endif
e83fe013 4741
16ef0a8c
JJ
4742 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4743 (for pows (POW)
4744 exps (EXP EXP2 EXP10 POW10)
4745 logs (LOG LOG2 LOG10 LOG10)
4746 (simplify
4747 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4748 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4749 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4750 (exps (plus (mult (logs @0) @1) @2)))))
4751
53f3cd25
RS
4752 (for sqrts (SQRT)
4753 cbrts (CBRT)
b4838d77 4754 pows (POW)
53f3cd25
RS
4755 exps (EXP EXP2 EXP10 POW10)
4756 /* sqrt(expN(x)) -> expN(x*0.5). */
4757 (simplify
4758 (sqrts (exps @0))
4759 (exps (mult @0 { build_real (type, dconsthalf); })))
4760 /* cbrt(expN(x)) -> expN(x/3). */
4761 (simplify
4762 (cbrts (exps @0))
b4838d77
RS
4763 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4764 /* pow(expN(x), y) -> expN(x*y). */
4765 (simplify
4766 (pows (exps @0) @1)
4767 (exps (mult @0 @1))))
cfed37a0
RS
4768
4769 /* tan(atan(x)) -> x. */
4770 (for tans (TAN)
4771 atans (ATAN)
4772 (simplify
4773 (tans (atans @0))
4774 @0)))
53f3cd25 4775
121ef08b
GB
4776 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4777 (for sins (SIN)
4778 atans (ATAN)
4779 sqrts (SQRT)
4780 copysigns (COPYSIGN)
4781 (simplify
4782 (sins (atans:s @0))
4783 (with
4784 {
4785 REAL_VALUE_TYPE r_cst;
4786 build_sinatan_real (&r_cst, type);
4787 tree t_cst = build_real (type, r_cst);
4788 tree t_one = build_one_cst (type);
4789 }
4790 (if (SCALAR_FLOAT_TYPE_P (type))
5f054b17 4791 (cond (lt (abs @0) { t_cst; })
121ef08b
GB
4792 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4793 (copysigns { t_one; } @0))))))
4794
4795/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4796 (for coss (COS)
4797 atans (ATAN)
4798 sqrts (SQRT)
4799 copysigns (COPYSIGN)
4800 (simplify
4801 (coss (atans:s @0))
4802 (with
4803 {
4804 REAL_VALUE_TYPE r_cst;
4805 build_sinatan_real (&r_cst, type);
4806 tree t_cst = build_real (type, r_cst);
4807 tree t_one = build_one_cst (type);
4808 tree t_zero = build_zero_cst (type);
4809 }
4810 (if (SCALAR_FLOAT_TYPE_P (type))
5f054b17 4811 (cond (lt (abs @0) { t_cst; })
121ef08b
GB
4812 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4813 (copysigns { t_zero; } @0))))))
4814
4aff6d17
GB
4815 (if (!flag_errno_math)
4816 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
4817 (for sinhs (SINH)
4818 atanhs (ATANH)
4819 sqrts (SQRT)
4820 (simplify
4821 (sinhs (atanhs:s @0))
4822 (with { tree t_one = build_one_cst (type); }
4823 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
4824
4825 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
4826 (for coshs (COSH)
4827 atanhs (ATANH)
4828 sqrts (SQRT)
4829 (simplify
4830 (coshs (atanhs:s @0))
4831 (with { tree t_one = build_one_cst (type); }
4832 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
4833
abcc43f5
RS
4834/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4835(simplify
e04d2a35 4836 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
4837 (abs @0))
4838
67dbe582 4839/* trunc(trunc(x)) -> trunc(x), etc. */
c6cfa2bf 4840(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
4841 (simplify
4842 (fns (fns @0))
4843 (fns @0)))
4844/* f(x) -> x if x is integer valued and f does nothing for such values. */
c6cfa2bf 4845(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
4846 (simplify
4847 (fns integer_valued_real_p@0)
4848 @0))
67dbe582 4849
4d7836c4
RS
4850/* hypot(x,0) and hypot(0,x) -> abs(x). */
4851(simplify
c9e926ce 4852 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
4853 (abs @0))
4854
b4838d77
RS
4855/* pow(1,x) -> 1. */
4856(simplify
4857 (POW real_onep@0 @1)
4858 @0)
4859
461e4145
RS
4860(simplify
4861 /* copysign(x,x) -> x. */
c6cfa2bf 4862 (COPYSIGN_ALL @0 @0)
461e4145
RS
4863 @0)
4864
4865(simplify
4866 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
c6cfa2bf 4867 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
461e4145
RS
4868 (abs @0))
4869
86c0733f
RS
4870(for scale (LDEXP SCALBN SCALBLN)
4871 /* ldexp(0, x) -> 0. */
4872 (simplify
4873 (scale real_zerop@0 @1)
4874 @0)
4875 /* ldexp(x, 0) -> x. */
4876 (simplify
4877 (scale @0 integer_zerop@1)
4878 @0)
4879 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4880 (simplify
4881 (scale REAL_CST@0 @1)
4882 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4883 @0)))
4884
53f3cd25
RS
4885/* Canonicalization of sequences of math builtins. These rules represent
4886 IL simplifications but are not necessarily optimizations.
4887
4888 The sincos pass is responsible for picking "optimal" implementations
4889 of math builtins, which may be more complicated and can sometimes go
4890 the other way, e.g. converting pow into a sequence of sqrts.
4891 We only want to do these canonicalizations before the pass has run. */
4892
4893(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4894 /* Simplify tan(x) * cos(x) -> sin(x). */
4895 (simplify
4896 (mult:c (TAN:s @0) (COS:s @0))
4897 (SIN @0))
4898
4899 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4900 (simplify
de3fbea3 4901 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
4902 (if (!TREE_OVERFLOW (@1))
4903 (POW @0 (plus @1 { build_one_cst (type); }))))
4904
4905 /* Simplify sin(x) / cos(x) -> tan(x). */
4906 (simplify
4907 (rdiv (SIN:s @0) (COS:s @0))
4908 (TAN @0))
4909
4910 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4911 (simplify
4912 (rdiv (COS:s @0) (SIN:s @0))
4913 (rdiv { build_one_cst (type); } (TAN @0)))
4914
4915 /* Simplify sin(x) / tan(x) -> cos(x). */
4916 (simplify
4917 (rdiv (SIN:s @0) (TAN:s @0))
4918 (if (! HONOR_NANS (@0)
4919 && ! HONOR_INFINITIES (@0))
c9e926ce 4920 (COS @0)))
53f3cd25
RS
4921
4922 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4923 (simplify
4924 (rdiv (TAN:s @0) (SIN:s @0))
4925 (if (! HONOR_NANS (@0)
4926 && ! HONOR_INFINITIES (@0))
4927 (rdiv { build_one_cst (type); } (COS @0))))
4928
4929 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4930 (simplify
4931 (mult (POW:s @0 @1) (POW:s @0 @2))
4932 (POW @0 (plus @1 @2)))
4933
4934 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4935 (simplify
4936 (mult (POW:s @0 @1) (POW:s @2 @1))
4937 (POW (mult @0 @2) @1))
4938
de3fbea3
RB
4939 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4940 (simplify
4941 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4942 (POWI (mult @0 @2) @1))
4943
53f3cd25
RS
4944 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4945 (simplify
4946 (rdiv (POW:s @0 REAL_CST@1) @0)
4947 (if (!TREE_OVERFLOW (@1))
4948 (POW @0 (minus @1 { build_one_cst (type); }))))
4949
4950 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4951 (simplify
4952 (rdiv @0 (POW:s @1 @2))
4953 (mult @0 (POW @1 (negate @2))))
4954
4955 (for sqrts (SQRT)
4956 cbrts (CBRT)
4957 pows (POW)
4958 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4959 (simplify
4960 (sqrts (sqrts @0))
4961 (pows @0 { build_real (type, dconst_quarter ()); }))
4962 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4963 (simplify
4964 (sqrts (cbrts @0))
4965 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4966 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4967 (simplify
4968 (cbrts (sqrts @0))
4969 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4970 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4971 (simplify
4972 (cbrts (cbrts tree_expr_nonnegative_p@0))
4973 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4974 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4975 (simplify
4976 (sqrts (pows @0 @1))
4977 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4978 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4979 (simplify
4980 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
4981 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4982 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4983 (simplify
4984 (pows (sqrts @0) @1)
4985 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4986 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4987 (simplify
4988 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4989 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4990 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4991 (simplify
4992 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4993 (pows @0 (mult @1 @2))))
abcc43f5
RS
4994
4995 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4996 (simplify
4997 (CABS (complex @0 @0))
96285749
RS
4998 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4999
4d7836c4
RS
5000 /* hypot(x,x) -> fabs(x)*sqrt(2). */
5001 (simplify
5002 (HYPOT @0 @0)
5003 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5004
96285749
RS
5005 /* cexp(x+yi) -> exp(x)*cexpi(y). */
5006 (for cexps (CEXP)
5007 exps (EXP)
5008 cexpis (CEXPI)
5009 (simplify
5010 (cexps compositional_complex@0)
5011 (if (targetm.libc_has_function (function_c99_math_complex))
5012 (complex
5013 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
5014 (mult @1 (imagpart @2)))))))
e18c1d66 5015
67dbe582
RS
5016(if (canonicalize_math_p ())
5017 /* floor(x) -> trunc(x) if x is nonnegative. */
c6cfa2bf
MM
5018 (for floors (FLOOR_ALL)
5019 truncs (TRUNC_ALL)
67dbe582
RS
5020 (simplify
5021 (floors tree_expr_nonnegative_p@0)
5022 (truncs @0))))
5023
5024(match double_value_p
5025 @0
5026 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
5027(for froms (BUILT_IN_TRUNCL
5028 BUILT_IN_FLOORL
5029 BUILT_IN_CEILL
5030 BUILT_IN_ROUNDL
5031 BUILT_IN_NEARBYINTL
5032 BUILT_IN_RINTL)
5033 tos (BUILT_IN_TRUNC
5034 BUILT_IN_FLOOR
5035 BUILT_IN_CEIL
5036 BUILT_IN_ROUND
5037 BUILT_IN_NEARBYINT
5038 BUILT_IN_RINT)
5039 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
5040 (if (optimize && canonicalize_math_p ())
5041 (simplify
5042 (froms (convert double_value_p@0))
5043 (convert (tos @0)))))
5044
5045(match float_value_p
5046 @0
5047 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
5048(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
5049 BUILT_IN_FLOORL BUILT_IN_FLOOR
5050 BUILT_IN_CEILL BUILT_IN_CEIL
5051 BUILT_IN_ROUNDL BUILT_IN_ROUND
5052 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
5053 BUILT_IN_RINTL BUILT_IN_RINT)
5054 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
5055 BUILT_IN_FLOORF BUILT_IN_FLOORF
5056 BUILT_IN_CEILF BUILT_IN_CEILF
5057 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
5058 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
5059 BUILT_IN_RINTF BUILT_IN_RINTF)
5060 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
5061 if x is a float. */
5dac7dbd
JDA
5062 (if (optimize && canonicalize_math_p ()
5063 && targetm.libc_has_function (function_c99_misc))
67dbe582
RS
5064 (simplify
5065 (froms (convert float_value_p@0))
5066 (convert (tos @0)))))
5067
543a9bcd
RS
5068(for froms (XFLOORL XCEILL XROUNDL XRINTL)
5069 tos (XFLOOR XCEIL XROUND XRINT)
5070 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
5071 (if (optimize && canonicalize_math_p ())
5072 (simplify
5073 (froms (convert double_value_p@0))
5074 (tos @0))))
5075
5076(for froms (XFLOORL XCEILL XROUNDL XRINTL
5077 XFLOOR XCEIL XROUND XRINT)
5078 tos (XFLOORF XCEILF XROUNDF XRINTF)
5079 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
5080 if x is a float. */
5081 (if (optimize && canonicalize_math_p ())
5082 (simplify
5083 (froms (convert float_value_p@0))
5084 (tos @0))))
5085
5086(if (canonicalize_math_p ())
5087 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
5088 (for floors (IFLOOR LFLOOR LLFLOOR)
5089 (simplify
5090 (floors tree_expr_nonnegative_p@0)
5091 (fix_trunc @0))))
5092
5093(if (canonicalize_math_p ())
5094 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
5095 (for fns (IFLOOR LFLOOR LLFLOOR
5096 ICEIL LCEIL LLCEIL
5097 IROUND LROUND LLROUND)
5098 (simplify
5099 (fns integer_valued_real_p@0)
5100 (fix_trunc @0)))
5101 (if (!flag_errno_math)
5102 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
5103 (for rints (IRINT LRINT LLRINT)
5104 (simplify
5105 (rints integer_valued_real_p@0)
5106 (fix_trunc @0)))))
5107
5108(if (canonicalize_math_p ())
5109 (for ifn (IFLOOR ICEIL IROUND IRINT)
5110 lfn (LFLOOR LCEIL LROUND LRINT)
5111 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
5112 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
5113 sizeof (int) == sizeof (long). */
5114 (if (TYPE_PRECISION (integer_type_node)
5115 == TYPE_PRECISION (long_integer_type_node))
5116 (simplify
5117 (ifn @0)
5118 (lfn:long_integer_type_node @0)))
5119 /* Canonicalize llround (x) to lround (x) on LP64 targets where
5120 sizeof (long long) == sizeof (long). */
5121 (if (TYPE_PRECISION (long_long_integer_type_node)
5122 == TYPE_PRECISION (long_integer_type_node))
5123 (simplify
5124 (llfn @0)
5125 (lfn:long_integer_type_node @0)))))
5126
92c52eab
RS
5127/* cproj(x) -> x if we're ignoring infinities. */
5128(simplify
5129 (CPROJ @0)
5130 (if (!HONOR_INFINITIES (type))
5131 @0))
5132
4534c203
RB
5133/* If the real part is inf and the imag part is known to be
5134 nonnegative, return (inf + 0i). */
5135(simplify
5136 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
5137 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
5138 { build_complex_inf (type, false); }))
5139
4534c203
RB
5140/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
5141(simplify
5142 (CPROJ (complex @0 REAL_CST@1))
5143 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 5144 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 5145
b4838d77
RS
5146(for pows (POW)
5147 sqrts (SQRT)
5148 cbrts (CBRT)
5149 (simplify
5150 (pows @0 REAL_CST@1)
5151 (with {
5152 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
5153 REAL_VALUE_TYPE tmp;
5154 }
5155 (switch
5156 /* pow(x,0) -> 1. */
5157 (if (real_equal (value, &dconst0))
5158 { build_real (type, dconst1); })
5159 /* pow(x,1) -> x. */
5160 (if (real_equal (value, &dconst1))
5161 @0)
5162 /* pow(x,-1) -> 1/x. */
5163 (if (real_equal (value, &dconstm1))
5164 (rdiv { build_real (type, dconst1); } @0))
5165 /* pow(x,0.5) -> sqrt(x). */
5166 (if (flag_unsafe_math_optimizations
5167 && canonicalize_math_p ()
5168 && real_equal (value, &dconsthalf))
5169 (sqrts @0))
5170 /* pow(x,1/3) -> cbrt(x). */
5171 (if (flag_unsafe_math_optimizations
5172 && canonicalize_math_p ()
5173 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
5174 real_equal (value, &tmp)))
5175 (cbrts @0))))))
4534c203 5176
5ddc84ca
RS
5177/* powi(1,x) -> 1. */
5178(simplify
5179 (POWI real_onep@0 @1)
5180 @0)
5181
5182(simplify
5183 (POWI @0 INTEGER_CST@1)
5184 (switch
5185 /* powi(x,0) -> 1. */
8e6cdc90 5186 (if (wi::to_wide (@1) == 0)
5ddc84ca
RS
5187 { build_real (type, dconst1); })
5188 /* powi(x,1) -> x. */
8e6cdc90 5189 (if (wi::to_wide (@1) == 1)
5ddc84ca
RS
5190 @0)
5191 /* powi(x,-1) -> 1/x. */
8e6cdc90 5192 (if (wi::to_wide (@1) == -1)
5ddc84ca
RS
5193 (rdiv { build_real (type, dconst1); } @0))))
5194
03cc70b5 5195/* Narrowing of arithmetic and logical operations.
be144838
JL
5196
5197 These are conceptually similar to the transformations performed for
5198 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
5199 term we want to move all that code out of the front-ends into here. */
5200
8f5331b2
TC
5201/* Convert (outertype)((innertype0)a+(innertype1)b)
5202 into ((newtype)a+(newtype)b) where newtype
5203 is the widest mode from all of these. */
5204(for op (plus minus mult rdiv)
5205 (simplify
5206 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
5207 /* If we have a narrowing conversion of an arithmetic operation where
5208 both operands are widening conversions from the same type as the outer
5209 narrowing conversion. Then convert the innermost operands to a
5210 suitable unsigned type (to avoid introducing undefined behavior),
5211 perform the operation and convert the result to the desired type. */
5212 (if (INTEGRAL_TYPE_P (type)
5213 && op != MULT_EXPR
5214 && op != RDIV_EXPR
5215 /* We check for type compatibility between @0 and @1 below,
5216 so there's no need to check that @2/@4 are integral types. */
5217 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
5218 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
5219 /* The precision of the type of each operand must match the
5220 precision of the mode of each operand, similarly for the
5221 result. */
5222 && type_has_mode_precision_p (TREE_TYPE (@1))
5223 && type_has_mode_precision_p (TREE_TYPE (@2))
5224 && type_has_mode_precision_p (type)
5225 /* The inner conversion must be a widening conversion. */
5226 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
5227 && types_match (@1, type)
5228 && (types_match (@1, @2)
5229 /* Or the second operand is const integer or converted const
5230 integer from valueize. */
5231 || TREE_CODE (@2) == INTEGER_CST))
5232 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
5233 (op @1 (convert @2))
5234 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
5235 (convert (op (convert:utype @1)
5236 (convert:utype @2)))))
5237 (if (FLOAT_TYPE_P (type)
5238 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
5239 == DECIMAL_FLOAT_TYPE_P (type))
5240 (with { tree arg0 = strip_float_extensions (@1);
5241 tree arg1 = strip_float_extensions (@2);
5242 tree itype = TREE_TYPE (@0);
5243 tree ty1 = TREE_TYPE (arg0);
5244 tree ty2 = TREE_TYPE (arg1);
5245 enum tree_code code = TREE_CODE (itype); }
5246 (if (FLOAT_TYPE_P (ty1)
5247 && FLOAT_TYPE_P (ty2))
5248 (with { tree newtype = type;
5249 if (TYPE_MODE (ty1) == SDmode
5250 || TYPE_MODE (ty2) == SDmode
5251 || TYPE_MODE (type) == SDmode)
5252 newtype = dfloat32_type_node;
5253 if (TYPE_MODE (ty1) == DDmode
5254 || TYPE_MODE (ty2) == DDmode
5255 || TYPE_MODE (type) == DDmode)
5256 newtype = dfloat64_type_node;
5257 if (TYPE_MODE (ty1) == TDmode
5258 || TYPE_MODE (ty2) == TDmode
5259 || TYPE_MODE (type) == TDmode)
5260 newtype = dfloat128_type_node; }
5261 (if ((newtype == dfloat32_type_node
5262 || newtype == dfloat64_type_node
5263 || newtype == dfloat128_type_node)
5264 && newtype == type
5265 && types_match (newtype, type))
5266 (op (convert:newtype @1) (convert:newtype @2))
dc5b1191 5267 (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype))
8f5331b2
TC
5268 newtype = ty1;
5269 if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype))
dc5b1191 5270 newtype = ty2; }
8f5331b2
TC
5271 /* Sometimes this transformation is safe (cannot
5272 change results through affecting double rounding
5273 cases) and sometimes it is not. If NEWTYPE is
5274 wider than TYPE, e.g. (float)((long double)double
5275 + (long double)double) converted to
5276 (float)(double + double), the transformation is
5277 unsafe regardless of the details of the types
5278 involved; double rounding can arise if the result
5279 of NEWTYPE arithmetic is a NEWTYPE value half way
5280 between two representable TYPE values but the
5281 exact value is sufficiently different (in the
5282 right direction) for this difference to be
5283 visible in ITYPE arithmetic. If NEWTYPE is the
5284 same as TYPE, however, the transformation may be
5285 safe depending on the types involved: it is safe
5286 if the ITYPE has strictly more than twice as many
5287 mantissa bits as TYPE, can represent infinities
5288 and NaNs if the TYPE can, and has sufficient
5289 exponent range for the product or ratio of two
5290 values representable in the TYPE to be within the
5291 range of normal values of ITYPE. */
5292 (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
5293 && (flag_unsafe_math_optimizations
5294 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
5295 && real_can_shorten_arithmetic (TYPE_MODE (itype),
5296 TYPE_MODE (type))
5297 && !excess_precision_type (newtype)))
5298 && !types_match (itype, newtype))
5299 (convert:type (op (convert:newtype @1)
5300 (convert:newtype @2)))
5301 )))) )
5302 ))
5303)))
48451e8f
JL
5304
5305/* This is another case of narrowing, specifically when there's an outer
5306 BIT_AND_EXPR which masks off bits outside the type of the innermost
5307 operands. Like the previous case we have to convert the operands
9c582551 5308 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
5309 arithmetic operation. */
5310(for op (minus plus)
8fdc6c67
RB
5311 (simplify
5312 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
5313 (if (INTEGRAL_TYPE_P (type)
5314 /* We check for type compatibility between @0 and @1 below,
5315 so there's no need to check that @1/@3 are integral types. */
5316 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5317 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
5318 /* The precision of the type of each operand must match the
5319 precision of the mode of each operand, similarly for the
5320 result. */
2be65d9e
RS
5321 && type_has_mode_precision_p (TREE_TYPE (@0))
5322 && type_has_mode_precision_p (TREE_TYPE (@1))
5323 && type_has_mode_precision_p (type)
8fdc6c67
RB
5324 /* The inner conversion must be a widening conversion. */
5325 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
5326 && types_match (@0, @1)
5327 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
5328 <= TYPE_PRECISION (TREE_TYPE (@0)))
8e6cdc90
RS
5329 && (wi::to_wide (@4)
5330 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
5331 true, TYPE_PRECISION (type))) == 0)
8fdc6c67
RB
5332 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5333 (with { tree ntype = TREE_TYPE (@0); }
5334 (convert (bit_and (op @0 @1) (convert:ntype @4))))
5335 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5336 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
5337 (convert:utype @4))))))))
4f7a5692 5338
03cc70b5 5339/* Transform (@0 < @1 and @0 < @2) to use min,
4f7a5692 5340 (@0 > @1 and @0 > @2) to use max */
dac920e8
MG
5341(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
5342 op (lt le gt ge lt le gt ge )
5343 ext (min min max max max max min min )
4f7a5692 5344 (simplify
dac920e8 5345 (logic (op:cs @0 @1) (op:cs @0 @2))
4618c453
RB
5346 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5347 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
5348 (op @0 (ext @1 @2)))))
5349
7317ef4a
RS
5350(simplify
5351 /* signbit(x) -> 0 if x is nonnegative. */
5352 (SIGNBIT tree_expr_nonnegative_p@0)
5353 { integer_zero_node; })
5354
5355(simplify
5356 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
5357 (SIGNBIT @0)
5358 (if (!HONOR_SIGNED_ZEROS (@0))
5359 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
5360
5361/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
5362(for cmp (eq ne)
5363 (for op (plus minus)
5364 rop (minus plus)
5365 (simplify
5366 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5367 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5368 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
5369 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
5370 && !TYPE_SATURATING (TREE_TYPE (@0)))
5371 (with { tree res = int_const_binop (rop, @2, @1); }
75473a91
RB
5372 (if (TREE_OVERFLOW (res)
5373 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
a8b85ce9
MG
5374 { constant_boolean_node (cmp == NE_EXPR, type); }
5375 (if (single_use (@3))
11c1e63c
JJ
5376 (cmp @0 { TREE_OVERFLOW (res)
5377 ? drop_tree_overflow (res) : res; }))))))))
a8b85ce9
MG
5378(for cmp (lt le gt ge)
5379 (for op (plus minus)
5380 rop (minus plus)
5381 (simplify
5382 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5383 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5384 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5385 (with { tree res = int_const_binop (rop, @2, @1); }
5386 (if (TREE_OVERFLOW (res))
5387 {
5388 fold_overflow_warning (("assuming signed overflow does not occur "
5389 "when simplifying conditional to constant"),
5390 WARN_STRICT_OVERFLOW_CONDITIONAL);
5391 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
5392 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
8e6cdc90
RS
5393 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
5394 TYPE_SIGN (TREE_TYPE (@1)))
a8b85ce9
MG
5395 != (op == MINUS_EXPR);
5396 constant_boolean_node (less == ovf_high, type);
5397 }
5398 (if (single_use (@3))
5399 (with
5400 {
5401 fold_overflow_warning (("assuming signed overflow does not occur "
5402 "when changing X +- C1 cmp C2 to "
5403 "X cmp C2 -+ C1"),
5404 WARN_STRICT_OVERFLOW_COMPARISON);
5405 }
5406 (cmp @0 { res; })))))))))
d3e40b76
RB
5407
5408/* Canonicalizations of BIT_FIELD_REFs. */
5409
6ec96dcb
RB
5410(simplify
5411 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
5412 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
5413
5414(simplify
5415 (BIT_FIELD_REF (view_convert @0) @1 @2)
5416 (BIT_FIELD_REF @0 @1 @2))
5417
5418(simplify
5419 (BIT_FIELD_REF @0 @1 integer_zerop)
5420 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
5421 (view_convert @0)))
5422
d3e40b76
RB
5423(simplify
5424 (BIT_FIELD_REF @0 @1 @2)
5425 (switch
5426 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
5427 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5428 (switch
5429 (if (integer_zerop (@2))
5430 (view_convert (realpart @0)))
5431 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5432 (view_convert (imagpart @0)))))
5433 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5434 && INTEGRAL_TYPE_P (type)
171f6f05
RB
5435 /* On GIMPLE this should only apply to register arguments. */
5436 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
5437 /* A bit-field-ref that referenced the full argument can be stripped. */
5438 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
5439 && integer_zerop (@2))
5440 /* Low-parts can be reduced to integral conversions.
5441 ??? The following doesn't work for PDP endian. */
5442 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
5443 /* Don't even think about BITS_BIG_ENDIAN. */
5444 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
5445 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
5446 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
5447 ? (TYPE_PRECISION (TREE_TYPE (@0))
5448 - TYPE_PRECISION (type))
5449 : 0)) == 0)))
5450 (convert @0))))
5451
5452/* Simplify vector extracts. */
5453
5454(simplify
5455 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
5456 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
5457 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
5458 || (VECTOR_TYPE_P (type)
5459 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
5460 (with
5461 {
5462 tree ctor = (TREE_CODE (@0) == SSA_NAME
5463 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
5464 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
5465 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
5466 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
5467 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
5468 }
5469 (if (n != 0
5470 && (idx % width) == 0
5471 && (n % width) == 0
928686b1
RS
5472 && known_le ((idx + n) / width,
5473 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
d3e40b76
RB
5474 (with
5475 {
5476 idx = idx / width;
5477 n = n / width;
5478 /* Constructor elements can be subvectors. */
d34457c1 5479 poly_uint64 k = 1;
d3e40b76
RB
5480 if (CONSTRUCTOR_NELTS (ctor) != 0)
5481 {
5482 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
5483 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
5484 k = TYPE_VECTOR_SUBPARTS (cons_elem);
5485 }
d34457c1 5486 unsigned HOST_WIDE_INT elt, count, const_k;
d3e40b76
RB
5487 }
5488 (switch
5489 /* We keep an exact subset of the constructor elements. */
d34457c1 5490 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
d3e40b76
RB
5491 (if (CONSTRUCTOR_NELTS (ctor) == 0)
5492 { build_constructor (type, NULL); }
d34457c1
RS
5493 (if (count == 1)
5494 (if (elt < CONSTRUCTOR_NELTS (ctor))
4c1da8ea 5495 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
d34457c1 5496 { build_zero_cst (type); })
d3e40b76 5497 {
d34457c1
RS
5498 vec<constructor_elt, va_gc> *vals;
5499 vec_alloc (vals, count);
5500 for (unsigned i = 0;
5501 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
5502 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
5503 CONSTRUCTOR_ELT (ctor, elt + i)->value);
5504 build_constructor (type, vals);
5505 })))
d3e40b76 5506 /* The bitfield references a single constructor element. */
d34457c1
RS
5507 (if (k.is_constant (&const_k)
5508 && idx + n <= (idx / const_k + 1) * const_k)
d3e40b76 5509 (switch
d34457c1 5510 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
d3e40b76 5511 { build_zero_cst (type); })
d34457c1 5512 (if (n == const_k)
4c1da8ea 5513 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
d34457c1
RS
5514 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
5515 @1 { bitsize_int ((idx % const_k) * width); })))))))))
92e29a5e
RB
5516
5517/* Simplify a bit extraction from a bit insertion for the cases with
5518 the inserted element fully covering the extraction or the insertion
5519 not touching the extraction. */
5520(simplify
5521 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
5522 (with
5523 {
5524 unsigned HOST_WIDE_INT isize;
5525 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
5526 isize = TYPE_PRECISION (TREE_TYPE (@1));
5527 else
5528 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
5529 }
5530 (switch
8e6cdc90
RS
5531 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
5532 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5533 wi::to_wide (@ipos) + isize))
92e29a5e 5534 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
8e6cdc90
RS
5535 wi::to_wide (@rpos)
5536 - wi::to_wide (@ipos)); }))
5537 (if (wi::geu_p (wi::to_wide (@ipos),
5538 wi::to_wide (@rpos) + wi::to_wide (@rsize))
5539 || wi::geu_p (wi::to_wide (@rpos),
5540 wi::to_wide (@ipos) + isize))
92e29a5e 5541 (BIT_FIELD_REF @0 @rsize @rpos)))))
c566cc9f 5542
c453ccc2
RS
5543(if (canonicalize_math_after_vectorization_p ())
5544 (for fmas (FMA)
5545 (simplify
5546 (fmas:c (negate @0) @1 @2)
5547 (IFN_FNMA @0 @1 @2))
5548 (simplify
5549 (fmas @0 @1 (negate @2))
5550 (IFN_FMS @0 @1 @2))
5551 (simplify
5552 (fmas:c (negate @0) @1 (negate @2))
5553 (IFN_FNMS @0 @1 @2))
5554 (simplify
5555 (negate (fmas@3 @0 @1 @2))
5556 (if (single_use (@3))
5557 (IFN_FNMS @0 @1 @2))))
5558
c566cc9f 5559 (simplify
c453ccc2
RS
5560 (IFN_FMS:c (negate @0) @1 @2)
5561 (IFN_FNMS @0 @1 @2))
5562 (simplify
5563 (IFN_FMS @0 @1 (negate @2))
5564 (IFN_FMA @0 @1 @2))
5565 (simplify
5566 (IFN_FMS:c (negate @0) @1 (negate @2))
c566cc9f
RS
5567 (IFN_FNMA @0 @1 @2))
5568 (simplify
c453ccc2
RS
5569 (negate (IFN_FMS@3 @0 @1 @2))
5570 (if (single_use (@3))
5571 (IFN_FNMA @0 @1 @2)))
5572
5573 (simplify
5574 (IFN_FNMA:c (negate @0) @1 @2)
5575 (IFN_FMA @0 @1 @2))
c566cc9f 5576 (simplify
c453ccc2 5577 (IFN_FNMA @0 @1 (negate @2))
c566cc9f
RS
5578 (IFN_FNMS @0 @1 @2))
5579 (simplify
c453ccc2
RS
5580 (IFN_FNMA:c (negate @0) @1 (negate @2))
5581 (IFN_FMS @0 @1 @2))
5582 (simplify
5583 (negate (IFN_FNMA@3 @0 @1 @2))
c566cc9f 5584 (if (single_use (@3))
c453ccc2 5585 (IFN_FMS @0 @1 @2)))
c566cc9f 5586
c453ccc2
RS
5587 (simplify
5588 (IFN_FNMS:c (negate @0) @1 @2)
5589 (IFN_FMS @0 @1 @2))
5590 (simplify
5591 (IFN_FNMS @0 @1 (negate @2))
5592 (IFN_FNMA @0 @1 @2))
5593 (simplify
5594 (IFN_FNMS:c (negate @0) @1 (negate @2))
5595 (IFN_FMA @0 @1 @2))
5596 (simplify
5597 (negate (IFN_FNMS@3 @0 @1 @2))
c566cc9f 5598 (if (single_use (@3))
c453ccc2 5599 (IFN_FMA @0 @1 @2))))
ba6557e2
RS
5600
5601/* POPCOUNT simplifications. */
5602(for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5603 BUILT_IN_POPCOUNTIMAX)
5604 /* popcount(X&1) is nop_expr(X&1). */
5605 (simplify
5606 (popcount @0)
5607 (if (tree_nonzero_bits (@0) == 1)
5608 (convert @0)))
5609 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5610 (simplify
5611 (plus (popcount:s @0) (popcount:s @1))
5612 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5613 (popcount (bit_ior @0 @1))))
5614 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5615 (for cmp (le eq ne gt)
5616 rep (eq eq ne ne)
5617 (simplify
5618 (cmp (popcount @0) integer_zerop)
5619 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
0d2b3bca
RS
5620
5621/* Simplify:
5622
5623 a = a1 op a2
5624 r = c ? a : b;
5625
5626 to:
5627
5628 r = c ? a1 op a2 : b;
5629
5630 if the target can do it in one go. This makes the operation conditional
5631 on c, so could drop potentially-trapping arithmetic, but that's a valid
cff1a122
JJ
5632 simplification if the result of the operation isn't needed.
5633
c16504f6
LJH
5634 Avoid speculatively generating a stand-alone vector comparison
5635 on targets that might not support them. Any target implementing
5636 conditional internal functions must support the same comparisons
5637 inside and outside a VEC_COND_EXPR. */
cff1a122 5638
ea5212b7 5639#if GIMPLE
0d2b3bca
RS
5640(for uncond_op (UNCOND_BINARY)
5641 cond_op (COND_BINARY)
5642 (simplify
5643 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
cff1a122
JJ
5644 (with { tree op_type = TREE_TYPE (@4); }
5645 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 5646 && element_precision (type) == element_precision (op_type))
0d2b3bca
RS
5647 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5648 (simplify
5649 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
cff1a122
JJ
5650 (with { tree op_type = TREE_TYPE (@4); }
5651 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 5652 && element_precision (type) == element_precision (op_type))
0d2b3bca 5653 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
6a86928d 5654
b41d1f6e
RS
5655/* Same for ternary operations. */
5656(for uncond_op (UNCOND_TERNARY)
5657 cond_op (COND_TERNARY)
5658 (simplify
5659 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
cff1a122
JJ
5660 (with { tree op_type = TREE_TYPE (@5); }
5661 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 5662 && element_precision (type) == element_precision (op_type))
b41d1f6e
RS
5663 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5664 (simplify
5665 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
cff1a122
JJ
5666 (with { tree op_type = TREE_TYPE (@5); }
5667 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 5668 && element_precision (type) == element_precision (op_type))
b41d1f6e
RS
5669 (view_convert (cond_op (bit_not @0) @2 @3 @4
5670 (view_convert:op_type @1)))))))
ea5212b7 5671#endif
b41d1f6e 5672
6a86928d
RS
5673/* Detect cases in which a VEC_COND_EXPR effectively replaces the
5674 "else" value of an IFN_COND_*. */
5675(for cond_op (COND_BINARY)
5676 (simplify
5677 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5678 (with { tree op_type = TREE_TYPE (@3); }
5679 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
5680 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5681 (simplify
5682 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5683 (with { tree op_type = TREE_TYPE (@5); }
5684 (if (inverse_conditions_p (@0, @2)
5685 && element_precision (type) == element_precision (op_type))
5686 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
b41d1f6e
RS
5687
5688/* Same for ternary operations. */
5689(for cond_op (COND_TERNARY)
5690 (simplify
5691 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5692 (with { tree op_type = TREE_TYPE (@4); }
5693 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
5694 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5695 (simplify
5696 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5697 (with { tree op_type = TREE_TYPE (@6); }
5698 (if (inverse_conditions_p (@0, @2)
5699 && element_precision (type) == element_precision (op_type))
5700 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
a19f98d5
RS
5701
5702/* For pointers @0 and @2 and nonnegative constant offset @1, look for
5703 expressions like:
5704
5705 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5706 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5707
5708 If pointers are known not to wrap, B checks whether @1 bytes starting
5709 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5710 bytes. A is more efficiently tested as:
5711
5712 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5713
5714 The equivalent expression for B is given by replacing @1 with @1 - 1:
5715
5716 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5717
5718 @0 and @2 can be swapped in both expressions without changing the result.
5719
5720 The folds rely on sizetype's being unsigned (which is always true)
5721 and on its being the same width as the pointer (which we have to check).
5722
5723 The fold replaces two pointer_plus expressions, two comparisons and
5724 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5725 the best case it's a saving of two operations. The A fold retains one
5726 of the original pointer_pluses, so is a win even if both pointer_pluses
5727 are used elsewhere. The B fold is a wash if both pointer_pluses are
5728 used elsewhere, since all we end up doing is replacing a comparison with
5729 a pointer_plus. We do still apply the fold under those circumstances
5730 though, in case applying it to other conditions eventually makes one of the
5731 pointer_pluses dead. */
5732(for ior (truth_orif truth_or bit_ior)
5733 (for cmp (le lt)
5734 (simplify
5735 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5736 (cmp:cs (pointer_plus@4 @2 @1) @0))
5737 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5738 && TYPE_OVERFLOW_WRAPS (sizetype)
5739 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5740 /* Calculate the rhs constant. */
5741 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5742 offset_int rhs = off * 2; }
5743 /* Always fails for negative values. */
5744 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5745 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5746 pick a canonical order. This increases the chances of using the
5747 same pointer_plus in multiple checks. */
5748 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5749 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5750 (if (cmp == LT_EXPR)
5751 (gt (convert:sizetype
5752 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5753 { swap_p ? @0 : @2; }))
5754 { rhs_tree; })
5755 (gt (convert:sizetype
5756 (pointer_diff:ssizetype
5757 (pointer_plus { swap_p ? @2 : @0; }
5758 { wide_int_to_tree (sizetype, off); })
5759 { swap_p ? @0 : @2; }))
5760 { rhs_tree; })))))))))
f4bf2aab
RS
5761
5762/* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
5763 element of @1. */
5764(for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
5765 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
5766 (with { int i = single_nonzero_element (@1); }
5767 (if (i >= 0)
5768 (with { tree elt = vector_cst_elt (@1, i);
5769 tree elt_type = TREE_TYPE (elt);
5770 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
5771 tree size = bitsize_int (elt_bits);
5772 tree pos = bitsize_int (elt_bits * i); }
5773 (view_convert
5774 (bit_and:elt_type
5775 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
5776 { elt; })))))))
ebd733a7
RB
5777
5778(simplify
5779 (vec_perm @0 @1 VECTOR_CST@2)
5780 (with
5781 {
5782 tree op0 = @0, op1 = @1, op2 = @2;
5783
5784 /* Build a vector of integers from the tree mask. */
5785 vec_perm_builder builder;
5786 if (!tree_to_vec_perm_builder (&builder, op2))
5787 return NULL_TREE;
5788
5789 /* Create a vec_perm_indices for the integer vector. */
5790 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
5791 bool single_arg = (op0 == op1);
5792 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
5793 }
5794 (if (sel.series_p (0, 1, 0, 1))
5795 { op0; }
5796 (if (sel.series_p (0, 1, nelts, 1))
5797 { op1; }
5798 (with
5799 {
5800 if (!single_arg)
5801 {
5802 if (sel.all_from_input_p (0))
5803 op1 = op0;
5804 else if (sel.all_from_input_p (1))
5805 {
5806 op0 = op1;
5807 sel.rotate_inputs (1);
5808 }
4f8b89f0
RB
5809 else if (known_ge (poly_uint64 (sel[0]), nelts))
5810 {
5811 std::swap (op0, op1);
5812 sel.rotate_inputs (1);
5813 }
ebd733a7
RB
5814 }
5815 gassign *def;
5816 tree cop0 = op0, cop1 = op1;
5817 if (TREE_CODE (op0) == SSA_NAME
5818 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
5819 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
5820 cop0 = gimple_assign_rhs1 (def);
5821 if (TREE_CODE (op1) == SSA_NAME
5822 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
5823 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
5824 cop1 = gimple_assign_rhs1 (def);
5825
5826 tree t;
5827 }
5828 (if ((TREE_CODE (cop0) == VECTOR_CST
5829 || TREE_CODE (cop0) == CONSTRUCTOR)
5830 && (TREE_CODE (cop1) == VECTOR_CST
5831 || TREE_CODE (cop1) == CONSTRUCTOR)
5832 && (t = fold_vec_perm (type, cop0, cop1, sel)))
5833 { t; }
5834 (with
5835 {
5836 bool changed = (op0 == op1 && !single_arg);
4f8b89f0
RB
5837 tree ins = NULL_TREE;
5838 unsigned at = 0;
5839
5840 /* See if the permutation is performing a single element
5841 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
5842 in that case. But only if the vector mode is supported,
5843 otherwise this is invalid GIMPLE. */
5844 if (TYPE_MODE (type) != BLKmode
5845 && (TREE_CODE (cop0) == VECTOR_CST
5846 || TREE_CODE (cop0) == CONSTRUCTOR
5847 || TREE_CODE (cop1) == VECTOR_CST
5848 || TREE_CODE (cop1) == CONSTRUCTOR))
5849 {
5850 if (sel.series_p (1, 1, nelts + 1, 1))
5851 {
5852 /* After canonicalizing the first elt to come from the
5853 first vector we only can insert the first elt from
5854 the first vector. */
5855 at = 0;
cc49641a 5856 if ((ins = fold_read_from_vector (cop0, sel[0])))
00e7f01d 5857 op0 = op1;
4f8b89f0
RB
5858 }
5859 else
5860 {
5861 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
5862 for (at = 0; at < encoded_nelts; ++at)
5863 if (maybe_ne (sel[at], at))
5864 break;
5865 if (at < encoded_nelts && sel.series_p (at + 1, 1, at + 1, 1))
5866 {
5867 if (known_lt (at, nelts))
5868 ins = fold_read_from_vector (cop0, sel[at]);
5869 else
5870 ins = fold_read_from_vector (cop1, sel[at] - nelts);
5871 }
5872 }
5873 }
ebd733a7
RB
5874
5875 /* Generate a canonical form of the selector. */
4f8b89f0 5876 if (!ins && sel.encoding () != builder)
ebd733a7
RB
5877 {
5878 /* Some targets are deficient and fail to expand a single
5879 argument permutation while still allowing an equivalent
5880 2-argument version. */
5881 tree oldop2 = op2;
5882 if (sel.ninputs () == 2
5883 || can_vec_perm_const_p (TYPE_MODE (type), sel, false))
5884 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
5885 else
5886 {
5887 vec_perm_indices sel2 (builder, 2, nelts);
5888 if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false))
5889 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
5890 else
5891 /* Not directly supported with either encoding,
5892 so use the preferred form. */
5893 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
5894 }
4f8b89f0
RB
5895 if (!operand_equal_p (op2, oldop2, 0))
5896 changed = true;
ebd733a7
RB
5897 }
5898 }
4f8b89f0
RB
5899 (if (ins)
5900 (bit_insert { op0; } { ins; }
5901 { bitsize_int (at * tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))); })
5902 (if (changed)
5903 (vec_perm { op0; } { op1; } { op2; }))))))))))
21caa1a2
PK
5904
5905/* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
5906
5907(match vec_same_elem_p
5908 @0
5909 (if (uniform_vector_p (@0))))
5910
5911(match vec_same_elem_p
5912 (vec_duplicate @0))
5913
5914(simplify
5915 (vec_perm vec_same_elem_p@0 @0 @1)
5916 @0)