]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
c++: Simplify constraint normalization routines
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
8d9254fc 5 Copyright (C) 2014-2020 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
46c66a46 32 initializer_each_zero_or_onep
f3582e54 33 CONSTANT_CLASS_P
887ab609 34 tree_expr_nonnegative_p
e36c1cfe 35 tree_expr_nonzero_p
67dbe582 36 integer_valued_real_p
53a19317 37 integer_pow2p
f06e47d7 38 uniform_integer_cst_p
21caa1a2
PK
39 HONOR_NANS
40 uniform_vector_p)
e0ee10ed 41
f84e7fd6
RB
42/* Operator lists. */
43(define_operator_list tcc_comparison
44 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
45(define_operator_list inverted_tcc_comparison
46 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
47(define_operator_list inverted_tcc_comparison_with_nans
48 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
49(define_operator_list swapped_tcc_comparison
50 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
51(define_operator_list simple_comparison lt le eq ne ge gt)
52(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
53
b1dc4a20 54#include "cfn-operators.pd"
257aecb4 55
543a9bcd
RS
56/* Define operand lists for math rounding functions {,i,l,ll}FN,
57 where the versions prefixed with "i" return an int, those prefixed with
58 "l" return a long and those prefixed with "ll" return a long long.
59
60 Also define operand lists:
61
62 X<FN>F for all float functions, in the order i, l, ll
63 X<FN> for all double functions, in the same order
64 X<FN>L for all long double functions, in the same order. */
65#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
66 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
67 BUILT_IN_L##FN##F \
68 BUILT_IN_LL##FN##F) \
69 (define_operator_list X##FN BUILT_IN_I##FN \
70 BUILT_IN_L##FN \
71 BUILT_IN_LL##FN) \
72 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 BUILT_IN_L##FN##L \
74 BUILT_IN_LL##FN##L)
75
543a9bcd
RS
76DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
77DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
78DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
79DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
0d2b3bca
RS
80
81/* Binary operations and their associated IFN_COND_* function. */
82(define_operator_list UNCOND_BINARY
83 plus minus
6c4fd4a9 84 mult trunc_div trunc_mod rdiv
0d2b3bca 85 min max
20103c0e
RS
86 bit_and bit_ior bit_xor
87 lshift rshift)
0d2b3bca
RS
88(define_operator_list COND_BINARY
89 IFN_COND_ADD IFN_COND_SUB
6c4fd4a9 90 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
0d2b3bca 91 IFN_COND_MIN IFN_COND_MAX
20103c0e
RS
92 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
93 IFN_COND_SHL IFN_COND_SHR)
b41d1f6e
RS
94
95/* Same for ternary operations. */
96(define_operator_list UNCOND_TERNARY
97 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
98(define_operator_list COND_TERNARY
99 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
03cc70b5 100
e150da38
RB
101/* With nop_convert? combine convert? and view_convert? in one pattern
102 plus conditionalize on tree_nop_conversion_p conversions. */
ed73f46f
MG
103(match (nop_convert @0)
104 (convert @0)
105 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
106(match (nop_convert @0)
107 (view_convert @0)
108 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
928686b1
RS
109 && known_eq (TYPE_VECTOR_SUBPARTS (type),
110 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
ed73f46f 111 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
f84e7fd6 112
e197e64e
KV
113/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
114 ABSU_EXPR returns unsigned absolute value of the operand and the operand
115 of the ABSU_EXPR will have the corresponding signed type. */
116(simplify (abs (convert @0))
117 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
118 && !TYPE_UNSIGNED (TREE_TYPE (@0))
119 && element_precision (type) > element_precision (TREE_TYPE (@0)))
120 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
121 (convert (absu:utype @0)))))
122
a0d732ee
JJ
123#if GIMPLE
124/* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */
125(simplify
126 (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2)
127 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
128 && !TYPE_UNSIGNED (TREE_TYPE (@0))
129 && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1)
130 (abs @0)))
131#endif
e197e64e 132
e0ee10ed 133/* Simplifications of operations with one constant operand and
36a60e48 134 simplifications to constants or single values. */
e0ee10ed
RB
135
136(for op (plus pointer_plus minus bit_ior bit_xor)
137 (simplify
138 (op @0 integer_zerop)
139 (non_lvalue @0)))
140
a499aac5
RB
141/* 0 +p index -> (type)index */
142(simplify
143 (pointer_plus integer_zerop @1)
144 (non_lvalue (convert @1)))
145
d43177ad
MG
146/* ptr - 0 -> (type)ptr */
147(simplify
148 (pointer_diff @0 integer_zerop)
149 (convert @0))
150
a7f24614
RB
151/* See if ARG1 is zero and X + ARG1 reduces to X.
152 Likewise if the operands are reversed. */
153(simplify
154 (plus:c @0 real_zerop@1)
155 (if (fold_real_zero_addition_p (type, @1, 0))
156 (non_lvalue @0)))
157
158/* See if ARG1 is zero and X - ARG1 reduces to X. */
159(simplify
160 (minus @0 real_zerop@1)
161 (if (fold_real_zero_addition_p (type, @1, 1))
162 (non_lvalue @0)))
f7b7e5d0
JJ
163
164/* Even if the fold_real_zero_addition_p can't simplify X + 0.0
165 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
166 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
167 if not -frounding-math. For sNaNs the first operation would raise
168 exceptions but turn the result into qNan, so the second operation
169 would not raise it. */
170(for inner_op (plus minus)
171 (for outer_op (plus minus)
172 (simplify
173 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
174 (if (real_zerop (@1)
175 && real_zerop (@2)
176 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
177 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
178 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
179 bool outer_plus
180 = ((outer_op == PLUS_EXPR)
181 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
182 (if (outer_plus && !inner_plus)
183 (outer_op @0 @2)
184 @3))))))
a7f24614 185
e0ee10ed
RB
186/* Simplify x - x.
187 This is unsafe for certain floats even in non-IEEE formats.
188 In IEEE, it is unsafe because it does wrong for NaNs.
189 Also note that operand_equal_p is always false if an operand
190 is volatile. */
191(simplify
a7f24614 192 (minus @0 @0)
1b457aa4 193 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 194 { build_zero_cst (type); }))
1af4ebf5
MG
195(simplify
196 (pointer_diff @@0 @0)
197 { build_zero_cst (type); })
e0ee10ed
RB
198
199(simplify
a7f24614
RB
200 (mult @0 integer_zerop@1)
201 @1)
202
203/* Maybe fold x * 0 to 0. The expressions aren't the same
204 when x is NaN, since x * 0 is also NaN. Nor are they the
205 same in modes with signed zeros, since multiplying a
206 negative value by 0 gives -0, not +0. */
207(simplify
208 (mult @0 real_zerop@1)
8b5ee871 209 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
210 @1))
211
212/* In IEEE floating point, x*1 is not equivalent to x for snans.
213 Likewise for complex arithmetic with signed zeros. */
214(simplify
215 (mult @0 real_onep)
8b5ee871
MG
216 (if (!HONOR_SNANS (type)
217 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
218 || !COMPLEX_FLOAT_TYPE_P (type)))
219 (non_lvalue @0)))
220
221/* Transform x * -1.0 into -x. */
222(simplify
223 (mult @0 real_minus_onep)
8b5ee871
MG
224 (if (!HONOR_SNANS (type)
225 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
226 || !COMPLEX_FLOAT_TYPE_P (type)))
227 (negate @0)))
e0ee10ed 228
ea8a6038
ML
229/* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */
230(simplify
231 (mult SSA_NAME@1 SSA_NAME@2)
232 (if (INTEGRAL_TYPE_P (type)
233 && get_nonzero_bits (@1) == 1
234 && get_nonzero_bits (@2) == 1)
235 (bit_and @1 @2)))
236
46c66a46
RS
237/* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
238 unless the target has native support for the former but not the latter. */
239(simplify
240 (mult @0 VECTOR_CST@1)
241 (if (initializer_each_zero_or_onep (@1)
242 && !HONOR_SNANS (type)
243 && !HONOR_SIGNED_ZEROS (type))
244 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
245 (if (itype
246 && (!VECTOR_MODE_P (TYPE_MODE (type))
247 || (VECTOR_MODE_P (TYPE_MODE (itype))
248 && optab_handler (and_optab,
249 TYPE_MODE (itype)) != CODE_FOR_nothing)))
250 (view_convert (bit_and:itype (view_convert @0)
251 (ne @1 { build_zero_cst (type); })))))))
252
8c2805bb
AP
253(for cmp (gt ge lt le)
254 outp (convert convert negate negate)
255 outn (negate negate convert convert)
256 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
257 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
258 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
259 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
260 (simplify
261 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
262 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
263 && types_match (type, TREE_TYPE (@0)))
264 (switch
265 (if (types_match (type, float_type_node))
266 (BUILT_IN_COPYSIGNF @1 (outp @0)))
267 (if (types_match (type, double_type_node))
268 (BUILT_IN_COPYSIGN @1 (outp @0)))
269 (if (types_match (type, long_double_type_node))
270 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
271 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
272 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
273 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
274 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
275 (simplify
276 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
277 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
278 && types_match (type, TREE_TYPE (@0)))
279 (switch
280 (if (types_match (type, float_type_node))
281 (BUILT_IN_COPYSIGNF @1 (outn @0)))
282 (if (types_match (type, double_type_node))
283 (BUILT_IN_COPYSIGN @1 (outn @0)))
284 (if (types_match (type, long_double_type_node))
285 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
286
287/* Transform X * copysign (1.0, X) into abs(X). */
288(simplify
c6cfa2bf 289 (mult:c @0 (COPYSIGN_ALL real_onep @0))
8c2805bb
AP
290 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
291 (abs @0)))
292
293/* Transform X * copysign (1.0, -X) into -abs(X). */
294(simplify
c6cfa2bf 295 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
8c2805bb
AP
296 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
297 (negate (abs @0))))
298
299/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
300(simplify
c6cfa2bf 301 (COPYSIGN_ALL REAL_CST@0 @1)
8c2805bb 302 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
c6cfa2bf 303 (COPYSIGN_ALL (negate @0) @1)))
8c2805bb 304
5b7f6ed0 305/* X * 1, X / 1 -> X. */
e0ee10ed
RB
306(for op (mult trunc_div ceil_div floor_div round_div exact_div)
307 (simplify
308 (op @0 integer_onep)
309 (non_lvalue @0)))
310
71f82be9
JG
311/* (A / (1 << B)) -> (A >> B).
312 Only for unsigned A. For signed A, this would not preserve rounding
313 toward zero.
873140e6
JJ
314 For example: (-1 / ( 1 << B)) != -1 >> B.
315 Also also widening conversions, like:
316 (A / (unsigned long long) (1U << B)) -> (A >> B)
317 or
318 (A / (unsigned long long) (1 << B)) -> (A >> B).
319 If the left shift is signed, it can be done only if the upper bits
320 of A starting from shift's type sign bit are zero, as
321 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
322 so it is valid only if A >> 31 is zero. */
71f82be9 323(simplify
873140e6 324 (trunc_div @0 (convert? (lshift integer_onep@1 @2)))
71f82be9
JG
325 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
326 && (!VECTOR_TYPE_P (type)
327 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
873140e6
JJ
328 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
329 && (useless_type_conversion_p (type, TREE_TYPE (@1))
330 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
331 && (TYPE_UNSIGNED (TREE_TYPE (@1))
332 || (element_precision (type)
333 == element_precision (TREE_TYPE (@1)))
6d5093da
JJ
334 || (INTEGRAL_TYPE_P (type)
335 && (tree_nonzero_bits (@0)
336 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
337 true,
338 element_precision (type))) == 0)))))
71f82be9
JG
339 (rshift @0 @2)))
340
5b7f6ed0
MG
341/* Preserve explicit divisions by 0: the C++ front-end wants to detect
342 undefined behavior in constexpr evaluation, and assuming that the division
343 traps enables better optimizations than these anyway. */
a7f24614 344(for div (trunc_div ceil_div floor_div round_div exact_div)
5b7f6ed0
MG
345 /* 0 / X is always zero. */
346 (simplify
347 (div integer_zerop@0 @1)
348 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
349 (if (!integer_zerop (@1))
350 @0))
da186c1f 351 /* X / -1 is -X. */
a7f24614 352 (simplify
09240451
MG
353 (div @0 integer_minus_onep@1)
354 (if (!TYPE_UNSIGNED (type))
da186c1f 355 (negate @0)))
5b7f6ed0
MG
356 /* X / X is one. */
357 (simplify
358 (div @0 @0)
9ebce098
JJ
359 /* But not for 0 / 0 so that we can get the proper warnings and errors.
360 And not for _Fract types where we can't build 1. */
361 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
5b7f6ed0 362 { build_one_cst (type); }))
03cc70b5 363 /* X / abs (X) is X < 0 ? -1 : 1. */
da186c1f 364 (simplify
d96a5585
RB
365 (div:C @0 (abs @0))
366 (if (INTEGRAL_TYPE_P (type)
da186c1f
RB
367 && TYPE_OVERFLOW_UNDEFINED (type))
368 (cond (lt @0 { build_zero_cst (type); })
369 { build_minus_one_cst (type); } { build_one_cst (type); })))
370 /* X / -X is -1. */
371 (simplify
d96a5585 372 (div:C @0 (negate @0))
da186c1f
RB
373 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
374 && TYPE_OVERFLOW_UNDEFINED (type))
375 { build_minus_one_cst (type); })))
a7f24614
RB
376
377/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
378 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
379(simplify
380 (floor_div @0 @1)
09240451
MG
381 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
382 && TYPE_UNSIGNED (type))
a7f24614
RB
383 (trunc_div @0 @1)))
384
28093105
RB
385/* Combine two successive divisions. Note that combining ceil_div
386 and floor_div is trickier and combining round_div even more so. */
387(for div (trunc_div exact_div)
c306cfaf 388 (simplify
98610dc5 389 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
c306cfaf 390 (with {
4a669ac3 391 wi::overflow_type overflow;
8e6cdc90 392 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 393 TYPE_SIGN (type), &overflow);
c306cfaf 394 }
98610dc5
JJ
395 (if (div == EXACT_DIV_EXPR
396 || optimize_successive_divisions_p (@2, @3))
397 (if (!overflow)
398 (div @0 { wide_int_to_tree (type, mul); })
399 (if (TYPE_UNSIGNED (type)
400 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
401 { build_zero_cst (type); }))))))
c306cfaf 402
288fe52e
AM
403/* Combine successive multiplications. Similar to above, but handling
404 overflow is different. */
405(simplify
406 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
407 (with {
4a669ac3 408 wi::overflow_type overflow;
8e6cdc90 409 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4a669ac3 410 TYPE_SIGN (type), &overflow);
288fe52e
AM
411 }
412 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
413 otherwise undefined overflow implies that @0 must be zero. */
4a669ac3 414 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
288fe52e
AM
415 (mult @0 { wide_int_to_tree (type, mul); }))))
416
a7f24614 417/* Optimize A / A to 1.0 if we don't care about
09240451 418 NaNs or Infinities. */
a7f24614
RB
419(simplify
420 (rdiv @0 @0)
09240451 421 (if (FLOAT_TYPE_P (type)
1b457aa4 422 && ! HONOR_NANS (type)
8b5ee871 423 && ! HONOR_INFINITIES (type))
09240451
MG
424 { build_one_cst (type); }))
425
426/* Optimize -A / A to -1.0 if we don't care about
427 NaNs or Infinities. */
428(simplify
e04d2a35 429 (rdiv:C @0 (negate @0))
09240451 430 (if (FLOAT_TYPE_P (type)
1b457aa4 431 && ! HONOR_NANS (type)
8b5ee871 432 && ! HONOR_INFINITIES (type))
09240451 433 { build_minus_one_cst (type); }))
a7f24614 434
8c6961ca
PK
435/* PR71078: x / abs(x) -> copysign (1.0, x) */
436(simplify
437 (rdiv:C (convert? @0) (convert? (abs @0)))
438 (if (SCALAR_FLOAT_TYPE_P (type)
439 && ! HONOR_NANS (type)
440 && ! HONOR_INFINITIES (type))
441 (switch
442 (if (types_match (type, float_type_node))
443 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
444 (if (types_match (type, double_type_node))
445 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
446 (if (types_match (type, long_double_type_node))
447 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
448
a7f24614
RB
449/* In IEEE floating point, x/1 is not equivalent to x for snans. */
450(simplify
451 (rdiv @0 real_onep)
8b5ee871 452 (if (!HONOR_SNANS (type))
a7f24614
RB
453 (non_lvalue @0)))
454
455/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
456(simplify
457 (rdiv @0 real_minus_onep)
8b5ee871 458 (if (!HONOR_SNANS (type))
a7f24614
RB
459 (negate @0)))
460
5711ac88 461(if (flag_reciprocal_math)
81825e28 462 /* Convert (A/B)/C to A/(B*C). */
5711ac88
N
463 (simplify
464 (rdiv (rdiv:s @0 @1) @2)
81825e28
WD
465 (rdiv @0 (mult @1 @2)))
466
467 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
468 (simplify
469 (rdiv @0 (mult:s @1 REAL_CST@2))
470 (with
471 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
472 (if (tem)
473 (rdiv (mult @0 { tem; } ) @1))))
5711ac88
N
474
475 /* Convert A/(B/C) to (A/B)*C */
476 (simplify
477 (rdiv @0 (rdiv:s @1 @2))
478 (mult (rdiv @0 @1) @2)))
479
6a435314
WD
480/* Simplify x / (- y) to -x / y. */
481(simplify
482 (rdiv @0 (negate @1))
483 (rdiv (negate @0) @1))
484
5e21d765
WD
485(if (flag_unsafe_math_optimizations)
486 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
487 Since C / x may underflow to zero, do this only for unsafe math. */
488 (for op (lt le gt ge)
489 neg_op (gt ge lt le)
490 (simplify
491 (op (rdiv REAL_CST@0 @1) real_zerop@2)
492 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
493 (switch
494 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
495 (op @1 @2))
496 /* For C < 0, use the inverted operator. */
497 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
498 (neg_op @1 @2)))))))
499
5711ac88
N
500/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
501(for div (trunc_div ceil_div floor_div round_div exact_div)
502 (simplify
503 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
504 (if (integer_pow2p (@2)
505 && tree_int_cst_sgn (@2) > 0
a1488398 506 && tree_nop_conversion_p (type, TREE_TYPE (@0))
8e6cdc90
RS
507 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
508 (rshift (convert @0)
509 { build_int_cst (integer_type_node,
510 wi::exact_log2 (wi::to_wide (@2))); }))))
5711ac88 511
a7f24614
RB
512/* If ARG1 is a constant, we can convert this to a multiply by the
513 reciprocal. This does not have the same rounding properties,
514 so only do this if -freciprocal-math. We can actually
515 always safely do it if ARG1 is a power of two, but it's hard to
516 tell if it is or not in a portable manner. */
517(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
518 (simplify
519 (rdiv @0 cst@1)
520 (if (optimize)
53bc4b3a
RB
521 (if (flag_reciprocal_math
522 && !real_zerop (@1))
a7f24614 523 (with
249700b5 524 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 525 (if (tem)
8fdc6c67
RB
526 (mult @0 { tem; } )))
527 (if (cst != COMPLEX_CST)
528 (with { tree inverse = exact_inverse (type, @1); }
529 (if (inverse)
530 (mult @0 { inverse; } ))))))))
a7f24614 531
a7f24614 532(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
533 /* 0 % X is always zero. */
534 (simplify
a7f24614 535 (mod integer_zerop@0 @1)
e0ee10ed
RB
536 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
537 (if (!integer_zerop (@1))
538 @0))
539 /* X % 1 is always zero. */
540 (simplify
a7f24614
RB
541 (mod @0 integer_onep)
542 { build_zero_cst (type); })
543 /* X % -1 is zero. */
544 (simplify
09240451
MG
545 (mod @0 integer_minus_onep@1)
546 (if (!TYPE_UNSIGNED (type))
bc4315fb 547 { build_zero_cst (type); }))
5b7f6ed0
MG
548 /* X % X is zero. */
549 (simplify
550 (mod @0 @0)
551 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
552 (if (!integer_zerop (@0))
553 { build_zero_cst (type); }))
bc4315fb
MG
554 /* (X % Y) % Y is just X % Y. */
555 (simplify
556 (mod (mod@2 @0 @1) @1)
98e30e51
RB
557 @2)
558 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
559 (simplify
560 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
561 (if (ANY_INTEGRAL_TYPE_P (type)
562 && TYPE_OVERFLOW_UNDEFINED (type)
8e6cdc90
RS
563 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
564 TYPE_SIGN (type)))
392750c5
JJ
565 { build_zero_cst (type); }))
566 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
567 modulo and comparison, since it is simpler and equivalent. */
568 (for cmp (eq ne)
569 (simplify
570 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
571 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
572 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
573 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
a7f24614
RB
574
575/* X % -C is the same as X % C. */
576(simplify
577 (trunc_mod @0 INTEGER_CST@1)
578 (if (TYPE_SIGN (type) == SIGNED
579 && !TREE_OVERFLOW (@1)
8e6cdc90 580 && wi::neg_p (wi::to_wide (@1))
a7f24614
RB
581 && !TYPE_OVERFLOW_TRAPS (type)
582 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
583 && !sign_bit_p (@1, @1))
584 (trunc_mod @0 (negate @1))))
e0ee10ed 585
8f0c696a
RB
586/* X % -Y is the same as X % Y. */
587(simplify
588 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
589 (if (INTEGRAL_TYPE_P (type)
590 && !TYPE_UNSIGNED (type)
8f0c696a 591 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
592 && tree_nop_conversion_p (type, TREE_TYPE (@1))
593 /* Avoid this transformation if X might be INT_MIN or
594 Y might be -1, because we would then change valid
595 INT_MIN % -(-1) into invalid INT_MIN % -1. */
8e6cdc90 596 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
20b8d734
JJ
597 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
598 (TREE_TYPE (@1))))))
8f0c696a
RB
599 (trunc_mod @0 (convert @1))))
600
f461569a
MP
601/* X - (X / Y) * Y is the same as X % Y. */
602(simplify
2eef1fc1
RB
603 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
604 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
fba46f03 605 (convert (trunc_mod @0 @1))))
f461569a 606
8f0c696a
RB
607/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
608 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
609 Also optimize A % (C << N) where C is a power of 2,
222f312a 610 to A & ((C << N) - 1).
611 Also optimize "A shift (B % C)", if C is a power of 2, to
612 "A shift (B & (C - 1))". SHIFT operation include "<<" and ">>"
613 and assume (B % C) is nonnegative as shifts negative values would
614 be UB. */
8f0c696a
RB
615(match (power_of_two_cand @1)
616 INTEGER_CST@1)
617(match (power_of_two_cand @1)
618 (lshift INTEGER_CST@1 @2))
619(for mod (trunc_mod floor_mod)
222f312a 620 (for shift (lshift rshift)
621 (simplify
622 (shift @0 (mod @1 (power_of_two_cand@2 @3)))
623 (if (integer_pow2p (@3) && tree_int_cst_sgn (@3) > 0)
624 (shift @0 (bit_and @1 (minus @2 { build_int_cst (TREE_TYPE (@2),
625 1); }))))))
8f0c696a 626 (simplify
4ab1e111 627 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
628 (if ((TYPE_UNSIGNED (type)
629 || tree_expr_nonnegative_p (@0))
4ab1e111 630 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 631 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 632 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 633
887ab609
N
634/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
635(simplify
636 (trunc_div (mult @0 integer_pow2p@1) @1)
637 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
638 (bit_and @0 { wide_int_to_tree
8e6cdc90
RS
639 (type, wi::mask (TYPE_PRECISION (type)
640 - wi::exact_log2 (wi::to_wide (@1)),
887ab609
N
641 false, TYPE_PRECISION (type))); })))
642
5f8d832e
N
643/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
644(simplify
645 (mult (trunc_div @0 integer_pow2p@1) @1)
646 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
647 (bit_and @0 (negate @1))))
648
95765f36
N
649/* Simplify (t * 2) / 2) -> t. */
650(for div (trunc_div ceil_div floor_div round_div exact_div)
651 (simplify
55d84e61 652 (div (mult:c @0 @1) @1)
95765f36
N
653 (if (ANY_INTEGRAL_TYPE_P (type)
654 && TYPE_OVERFLOW_UNDEFINED (type))
655 @0)))
656
d202f9bd 657(for op (negate abs)
9b054b08
RS
658 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
659 (for coss (COS COSH)
660 (simplify
661 (coss (op @0))
662 (coss @0)))
663 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
664 (for pows (POW)
665 (simplify
666 (pows (op @0) REAL_CST@1)
667 (with { HOST_WIDE_INT n; }
668 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 669 (pows @0 @1)))))
de3fbea3
RB
670 /* Likewise for powi. */
671 (for pows (POWI)
672 (simplify
673 (pows (op @0) INTEGER_CST@1)
8e6cdc90 674 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 675 (pows @0 @1))))
5d3498b4
RS
676 /* Strip negate and abs from both operands of hypot. */
677 (for hypots (HYPOT)
678 (simplify
679 (hypots (op @0) @1)
680 (hypots @0 @1))
681 (simplify
682 (hypots @0 (op @1))
683 (hypots @0 @1)))
684 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
c6cfa2bf 685 (for copysigns (COPYSIGN_ALL)
5d3498b4
RS
686 (simplify
687 (copysigns (op @0) @1)
688 (copysigns @0 @1))))
689
690/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
691(simplify
692 (mult (abs@1 @0) @1)
693 (mult @0 @0))
694
64f7ea7c
KV
695/* Convert absu(x)*absu(x) -> x*x. */
696(simplify
697 (mult (absu@1 @0) @1)
698 (mult (convert@2 @0) @2))
699
5d3498b4
RS
700/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
701(for coss (COS COSH)
702 copysigns (COPYSIGN)
703 (simplify
704 (coss (copysigns @0 @1))
705 (coss @0)))
706
707/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
708(for pows (POW)
709 copysigns (COPYSIGN)
710 (simplify
de3fbea3 711 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
712 (with { HOST_WIDE_INT n; }
713 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
714 (pows @0 @1)))))
de3fbea3
RB
715/* Likewise for powi. */
716(for pows (POWI)
717 copysigns (COPYSIGN)
718 (simplify
719 (pows (copysigns @0 @2) INTEGER_CST@1)
8e6cdc90 720 (if ((wi::to_wide (@1) & 1) == 0)
de3fbea3 721 (pows @0 @1))))
5d3498b4
RS
722
723(for hypots (HYPOT)
724 copysigns (COPYSIGN)
725 /* hypot(copysign(x, y), z) -> hypot(x, z). */
726 (simplify
727 (hypots (copysigns @0 @1) @2)
728 (hypots @0 @2))
729 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
730 (simplify
731 (hypots @0 (copysigns @1 @2))
732 (hypots @0 @1)))
733
eeb57981 734/* copysign(x, CST) -> [-]abs (x). */
c6cfa2bf 735(for copysigns (COPYSIGN_ALL)
eeb57981
RB
736 (simplify
737 (copysigns @0 REAL_CST@1)
738 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
739 (negate (abs @0))
740 (abs @0))))
741
5d3498b4 742/* copysign(copysign(x, y), z) -> copysign(x, z). */
c6cfa2bf 743(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
744 (simplify
745 (copysigns (copysigns @0 @1) @2)
746 (copysigns @0 @2)))
747
748/* copysign(x,y)*copysign(x,y) -> x*x. */
c6cfa2bf 749(for copysigns (COPYSIGN_ALL)
5d3498b4
RS
750 (simplify
751 (mult (copysigns@2 @0 @1) @2)
752 (mult @0 @0)))
753
754/* ccos(-x) -> ccos(x). Similarly for ccosh. */
755(for ccoss (CCOS CCOSH)
756 (simplify
757 (ccoss (negate @0))
758 (ccoss @0)))
d202f9bd 759
abcc43f5
RS
760/* cabs(-x) and cos(conj(x)) -> cabs(x). */
761(for ops (conj negate)
762 (for cabss (CABS)
763 (simplify
764 (cabss (ops @0))
765 (cabss @0))))
766
0a8f32b8
RB
767/* Fold (a * (1 << b)) into (a << b) */
768(simplify
769 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
770 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 771 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
772 (lshift @0 @2)))
773
4349b15f
SD
774/* Fold (1 << (C - x)) where C = precision(type) - 1
775 into ((1 << C) >> x). */
776(simplify
777 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
778 (if (INTEGRAL_TYPE_P (type)
56ccfbd6 779 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
4349b15f
SD
780 && single_use (@1))
781 (if (TYPE_UNSIGNED (type))
782 (rshift (lshift @0 @2) @3)
783 (with
784 { tree utype = unsigned_type_for (type); }
785 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
786
0a8f32b8
RB
787/* Fold (C1/X)*C2 into (C1*C2)/X. */
788(simplify
ff86345f
RB
789 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
790 (if (flag_associative_math
791 && single_use (@3))
0a8f32b8
RB
792 (with
793 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
794 (if (tem)
795 (rdiv { tem; } @1)))))
796
797/* Simplify ~X & X as zero. */
798(simplify
799 (bit_and:c (convert? @0) (convert? (bit_not @0)))
800 { build_zero_cst (type); })
801
89b80c42
PK
802/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
803(simplify
804 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
805 (if (TYPE_UNSIGNED (type))
806 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
807
7aa13860
PK
808(for bitop (bit_and bit_ior)
809 cmp (eq ne)
a93952d2
JJ
810 /* PR35691: Transform
811 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
812 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
7aa13860
PK
813 (simplify
814 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
815 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
a93952d2
JJ
816 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
817 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
818 (cmp (bit_ior @0 (convert @1)) @2)))
819 /* Transform:
820 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
821 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
822 (simplify
823 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
824 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
825 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
826 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
827 (cmp (bit_and @0 (convert @1)) @2))))
7aa13860 828
10158317
RB
829/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
830(simplify
a9658b11 831 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
832 (minus (bit_xor @0 @1) @1))
833(simplify
834 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
8e6cdc90 835 (if (~wi::to_wide (@2) == wi::to_wide (@1))
10158317
RB
836 (minus (bit_xor @0 @1) @1)))
837
838/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
839(simplify
a8e9f9a3 840 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
841 (minus @1 (bit_xor @0 @1)))
842
42bd89ce
MG
843/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
844(for op (bit_ior bit_xor plus)
845 (simplify
846 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
847 (bit_xor @0 @1))
848 (simplify
849 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
8e6cdc90 850 (if (~wi::to_wide (@2) == wi::to_wide (@1))
42bd89ce 851 (bit_xor @0 @1))))
2066ef6a
PK
852
853/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
854(simplify
855 (bit_ior:c (bit_xor:c @0 @1) @0)
856 (bit_ior @0 @1))
857
e268a77b
MG
858/* (a & ~b) | (a ^ b) --> a ^ b */
859(simplify
860 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
861 @2)
862
863/* (a & ~b) ^ ~a --> ~(a & b) */
864(simplify
865 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
866 (bit_not (bit_and @0 @1)))
867
52792faa
KK
868/* (~a & b) ^ a --> (a | b) */
869(simplify
870 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
871 (bit_ior @0 @1))
872
e268a77b
MG
873/* (a | b) & ~(a ^ b) --> a & b */
874(simplify
875 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
876 (bit_and @0 @1))
877
878/* a | ~(a ^ b) --> a | ~b */
879(simplify
880 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
881 (bit_ior @0 (bit_not @1)))
882
883/* (a | b) | (a &^ b) --> a | b */
884(for op (bit_and bit_xor)
885 (simplify
886 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
887 @2))
888
889/* (a & b) | ~(a ^ b) --> ~(a ^ b) */
890(simplify
891 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
892 @2)
893
894/* ~(~a & b) --> a | ~b */
895(simplify
896 (bit_not (bit_and:cs (bit_not @0) @1))
897 (bit_ior @0 (bit_not @1)))
898
fd8303a5
MC
899/* ~(~a | b) --> a & ~b */
900(simplify
901 (bit_not (bit_ior:cs (bit_not @0) @1))
902 (bit_and @0 (bit_not @1)))
903
d982c5b7
MG
904/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
905#if GIMPLE
906(simplify
907 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
908 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 909 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
d982c5b7
MG
910 (bit_xor @0 @1)))
911#endif
10158317 912
f2901002
JJ
913/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
914 ((A & N) + B) & M -> (A + B) & M
915 Similarly if (N & M) == 0,
916 ((A | N) + B) & M -> (A + B) & M
917 and for - instead of + (or unary - instead of +)
918 and/or ^ instead of |.
919 If B is constant and (B & M) == 0, fold into A & M. */
920(for op (plus minus)
921 (for bitop (bit_and bit_ior bit_xor)
922 (simplify
923 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
924 (with
925 { tree pmop[2];
926 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
927 @3, @4, @1, ERROR_MARK, NULL_TREE,
928 NULL_TREE, pmop); }
929 (if (utype)
930 (convert (bit_and (op (convert:utype { pmop[0]; })
931 (convert:utype { pmop[1]; }))
932 (convert:utype @2))))))
933 (simplify
934 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
935 (with
936 { tree pmop[2];
937 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
938 NULL_TREE, NULL_TREE, @1, bitop, @3,
939 @4, pmop); }
940 (if (utype)
941 (convert (bit_and (op (convert:utype { pmop[0]; })
942 (convert:utype { pmop[1]; }))
943 (convert:utype @2)))))))
944 (simplify
945 (bit_and (op:s @0 @1) INTEGER_CST@2)
946 (with
947 { tree pmop[2];
948 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
949 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
950 NULL_TREE, NULL_TREE, pmop); }
951 (if (utype)
952 (convert (bit_and (op (convert:utype { pmop[0]; })
953 (convert:utype { pmop[1]; }))
954 (convert:utype @2)))))))
955(for bitop (bit_and bit_ior bit_xor)
956 (simplify
957 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
958 (with
959 { tree pmop[2];
960 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
961 bitop, @2, @3, NULL_TREE, ERROR_MARK,
962 NULL_TREE, NULL_TREE, pmop); }
963 (if (utype)
964 (convert (bit_and (negate (convert:utype { pmop[0]; }))
965 (convert:utype @1)))))))
966
bc4315fb
MG
967/* X % Y is smaller than Y. */
968(for cmp (lt ge)
969 (simplify
970 (cmp (trunc_mod @0 @1) @1)
971 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
972 { constant_boolean_node (cmp == LT_EXPR, type); })))
973(for cmp (gt le)
974 (simplify
975 (cmp @1 (trunc_mod @0 @1))
976 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
977 { constant_boolean_node (cmp == GT_EXPR, type); })))
978
e0ee10ed
RB
979/* x | ~0 -> ~0 */
980(simplify
ca0b7ece
RB
981 (bit_ior @0 integer_all_onesp@1)
982 @1)
983
984/* x | 0 -> x */
985(simplify
986 (bit_ior @0 integer_zerop)
987 @0)
e0ee10ed
RB
988
989/* x & 0 -> 0 */
990(simplify
ca0b7ece
RB
991 (bit_and @0 integer_zerop@1)
992 @1)
e0ee10ed 993
a4398a30 994/* ~x | x -> -1 */
8b5ee871
MG
995/* ~x ^ x -> -1 */
996/* ~x + x -> -1 */
997(for op (bit_ior bit_xor plus)
998 (simplify
999 (op:c (convert? @0) (convert? (bit_not @0)))
1000 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 1001
e0ee10ed
RB
1002/* x ^ x -> 0 */
1003(simplify
1004 (bit_xor @0 @0)
1005 { build_zero_cst (type); })
1006
36a60e48
RB
1007/* Canonicalize X ^ ~0 to ~X. */
1008(simplify
1009 (bit_xor @0 integer_all_onesp@1)
1010 (bit_not @0))
1011
1012/* x & ~0 -> x */
1013(simplify
1014 (bit_and @0 integer_all_onesp)
1015 (non_lvalue @0))
1016
1017/* x & x -> x, x | x -> x */
1018(for bitop (bit_and bit_ior)
1019 (simplify
1020 (bitop @0 @0)
1021 (non_lvalue @0)))
1022
c7986356
MG
1023/* x & C -> x if we know that x & ~C == 0. */
1024#if GIMPLE
1025(simplify
1026 (bit_and SSA_NAME@0 INTEGER_CST@1)
1027 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
8e6cdc90 1028 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
c7986356
MG
1029 @0))
1030#endif
1031
a7b76d57
JJ
1032/* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */
1033(simplify
1034 (bit_not (minus (bit_not @0) @1))
1035 (plus @0 @1))
1036(simplify
1037 (bit_not (plus:c (bit_not @0) @1))
1038 (minus @0 @1))
1039
0f770b01
RV
1040/* x + (x & 1) -> (x + 1) & ~1 */
1041(simplify
44fc0a51
RB
1042 (plus:c @0 (bit_and:s @0 integer_onep@1))
1043 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
1044
1045/* x & ~(x & y) -> x & ~y */
1046/* x | ~(x | y) -> x | ~y */
1047(for bitop (bit_and bit_ior)
af563d4b 1048 (simplify
44fc0a51
RB
1049 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1050 (bitop @0 (bit_not @1))))
af563d4b 1051
03cc70b5
MC
1052/* (~x & y) | ~(x | y) -> ~x */
1053(simplify
1054 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1055 @2)
1056
1057/* (x | y) ^ (x | ~y) -> ~x */
1058(simplify
1059 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1060 (bit_not @0))
1061
1062/* (x & y) | ~(x | y) -> ~(x ^ y) */
1063(simplify
1064 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1065 (bit_not (bit_xor @0 @1)))
1066
1067/* (~x | y) ^ (x ^ y) -> x | ~y */
1068(simplify
1069 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1070 (bit_ior @0 (bit_not @1)))
1071
1072/* (x ^ y) | ~(x | y) -> ~(x & y) */
1073(simplify
1074 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1075 (bit_not (bit_and @0 @1)))
1076
af563d4b
MG
1077/* (x | y) & ~x -> y & ~x */
1078/* (x & y) | ~x -> y | ~x */
1079(for bitop (bit_and bit_ior)
1080 rbitop (bit_ior bit_and)
1081 (simplify
1082 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1083 (bitop @1 @2)))
0f770b01 1084
f13c4673
MP
1085/* (x & y) ^ (x | y) -> x ^ y */
1086(simplify
2d6f2dce
MP
1087 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1088 (bit_xor @0 @1))
f13c4673 1089
9ea65ca6
MP
1090/* (x ^ y) ^ (x | y) -> x & y */
1091(simplify
1092 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1093 (bit_and @0 @1))
1094
1095/* (x & y) + (x ^ y) -> x | y */
1096/* (x & y) | (x ^ y) -> x | y */
1097/* (x & y) ^ (x ^ y) -> x | y */
1098(for op (plus bit_ior bit_xor)
1099 (simplify
1100 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1101 (bit_ior @0 @1)))
1102
1103/* (x & y) + (x | y) -> x + y */
1104(simplify
1105 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1106 (plus @0 @1))
1107
9737efaf
MP
1108/* (x + y) - (x | y) -> x & y */
1109(simplify
1110 (minus (plus @0 @1) (bit_ior @0 @1))
1111 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1112 && !TYPE_SATURATING (type))
1113 (bit_and @0 @1)))
1114
1115/* (x + y) - (x & y) -> x | y */
1116(simplify
1117 (minus (plus @0 @1) (bit_and @0 @1))
1118 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1119 && !TYPE_SATURATING (type))
1120 (bit_ior @0 @1)))
1121
e0bfe016
PW
1122/* (x | y) - y -> (x & ~y) */
1123(simplify
1124 (minus (bit_ior:cs @0 @1) @1)
1125 (bit_and @0 (bit_not @1)))
1126
9ea65ca6
MP
1127/* (x | y) - (x ^ y) -> x & y */
1128(simplify
1129 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1130 (bit_and @0 @1))
1131
1132/* (x | y) - (x & y) -> x ^ y */
1133(simplify
1134 (minus (bit_ior @0 @1) (bit_and @0 @1))
1135 (bit_xor @0 @1))
1136
66cc6273
MP
1137/* (x | y) & ~(x & y) -> x ^ y */
1138(simplify
1139 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1140 (bit_xor @0 @1))
1141
1142/* (x | y) & (~x ^ y) -> x & y */
1143(simplify
1144 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1145 (bit_and @0 @1))
1146
fd8303a5
MC
1147/* (~x | y) & (x | ~y) -> ~(x ^ y) */
1148(simplify
1149 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1150 (bit_not (bit_xor @0 @1)))
1151
1152/* (~x | y) ^ (x | ~y) -> x ^ y */
1153(simplify
1154 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1155 (bit_xor @0 @1))
1156
553c6572
JL
1157/* ((x & y) - (x | y)) - 1 -> ~(x ^ y) */
1158(simplify
1159 (plus (nop_convert1? (minus@2 (nop_convert2? (bit_and:c @0 @1))
1160 (nop_convert2? (bit_ior @0 @1))))
1161 integer_all_onesp)
1162 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1163 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1164 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1165 && !TYPE_SATURATING (TREE_TYPE (@2)))
1166 (bit_not (convert (bit_xor @0 @1)))))
1167(simplify
1168 (minus (nop_convert1? (plus@2 (nop_convert2? (bit_and:c @0 @1))
1169 integer_all_onesp))
1170 (nop_convert3? (bit_ior @0 @1)))
1171 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1172 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1173 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1174 && !TYPE_SATURATING (TREE_TYPE (@2)))
1175 (bit_not (convert (bit_xor @0 @1)))))
1176(simplify
1177 (minus (nop_convert1? (bit_and @0 @1))
1178 (nop_convert2? (plus@2 (nop_convert3? (bit_ior:c @0 @1))
1179 integer_onep)))
1180 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1181 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1182 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1183 && !TYPE_SATURATING (TREE_TYPE (@2)))
1184 (bit_not (convert (bit_xor @0 @1)))))
1185
5b00d921
RB
1186/* ~x & ~y -> ~(x | y)
1187 ~x | ~y -> ~(x & y) */
1188(for op (bit_and bit_ior)
1189 rop (bit_ior bit_and)
1190 (simplify
1191 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1192 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1193 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1194 (bit_not (rop (convert @0) (convert @1))))))
1195
14ea9f92 1196/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
1197 with a constant, and the two constants have no bits in common,
1198 we should treat this as a BIT_IOR_EXPR since this may produce more
1199 simplifications. */
14ea9f92
RB
1200(for op (bit_xor plus)
1201 (simplify
1202 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1203 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1204 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1205 && tree_nop_conversion_p (type, TREE_TYPE (@2))
8e6cdc90 1206 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
14ea9f92 1207 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
1208
1209/* (X | Y) ^ X -> Y & ~ X*/
1210(simplify
2eef1fc1 1211 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
5b00d921
RB
1212 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1213 (convert (bit_and @1 (bit_not @0)))))
1214
1215/* Convert ~X ^ ~Y to X ^ Y. */
1216(simplify
1217 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
1218 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1219 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
1220 (bit_xor (convert @0) (convert @1))))
1221
1222/* Convert ~X ^ C to X ^ ~C. */
1223(simplify
1224 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
1225 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1226 (bit_xor (convert @0) (bit_not @1))))
5b00d921 1227
e39dab2c
MG
1228/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1229(for opo (bit_and bit_xor)
1230 opi (bit_xor bit_and)
1231 (simplify
de5b5228 1232 (opo:c (opi:cs @0 @1) @1)
e39dab2c 1233 (bit_and (bit_not @0) @1)))
97e77391 1234
14ea9f92
RB
1235/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1236 operands are another bit-wise operation with a common input. If so,
1237 distribute the bit operations to save an operation and possibly two if
1238 constants are involved. For example, convert
1239 (A | B) & (A | C) into A | (B & C)
1240 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
1241(for op (bit_and bit_ior bit_xor)
1242 rop (bit_ior bit_and bit_and)
14ea9f92 1243 (simplify
2eef1fc1 1244 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
e07ab2fe
MG
1245 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1246 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
1247 (rop (convert @0) (op (convert @1) (convert @2))))))
1248
e39dab2c
MG
1249/* Some simple reassociation for bit operations, also handled in reassoc. */
1250/* (X & Y) & Y -> X & Y
1251 (X | Y) | Y -> X | Y */
1252(for op (bit_and bit_ior)
1253 (simplify
2eef1fc1 1254 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
e39dab2c
MG
1255 @2))
1256/* (X ^ Y) ^ Y -> X */
1257(simplify
2eef1fc1 1258 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
ece46666 1259 (convert @0))
e39dab2c
MG
1260/* (X & Y) & (X & Z) -> (X & Y) & Z
1261 (X | Y) | (X | Z) -> (X | Y) | Z */
1262(for op (bit_and bit_ior)
1263 (simplify
6c35e5b0 1264 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
e39dab2c
MG
1265 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1266 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1267 (if (single_use (@5) && single_use (@6))
1268 (op @3 (convert @2))
1269 (if (single_use (@3) && single_use (@4))
1270 (op (convert @1) @5))))))
1271/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1272(simplify
1273 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1274 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1275 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 1276 (bit_xor (convert @1) (convert @2))))
5b00d921 1277
64f7ea7c
KV
1278/* Convert abs (abs (X)) into abs (X).
1279 also absu (absu (X)) into absu (X). */
b14a9c57
RB
1280(simplify
1281 (abs (abs@1 @0))
1282 @1)
64f7ea7c
KV
1283
1284(simplify
1285 (absu (convert@2 (absu@1 @0)))
1286 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1287 @1))
1288
1289/* Convert abs[u] (-X) -> abs[u] (X). */
f3582e54
RB
1290(simplify
1291 (abs (negate @0))
1292 (abs @0))
64f7ea7c
KV
1293
1294(simplify
1295 (absu (negate @0))
1296 (absu @0))
1297
1298/* Convert abs[u] (X) where X is nonnegative -> (X). */
f3582e54
RB
1299(simplify
1300 (abs tree_expr_nonnegative_p@0)
1301 @0)
1302
64f7ea7c
KV
1303(simplify
1304 (absu tree_expr_nonnegative_p@0)
1305 (convert @0))
1306
55cf3946
RB
1307/* A few cases of fold-const.c negate_expr_p predicate. */
1308(match negate_expr_p
1309 INTEGER_CST
b14a9c57 1310 (if ((INTEGRAL_TYPE_P (type)
56a6d474 1311 && TYPE_UNSIGNED (type))
b14a9c57 1312 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
1313 && may_negate_without_overflow_p (t)))))
1314(match negate_expr_p
1315 FIXED_CST)
1316(match negate_expr_p
1317 (negate @0)
1318 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1319(match negate_expr_p
1320 REAL_CST
1321 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1322/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1323 ways. */
1324(match negate_expr_p
1325 VECTOR_CST
1326 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
81bd903a
MG
1327(match negate_expr_p
1328 (minus @0 @1)
1329 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1330 || (FLOAT_TYPE_P (type)
1331 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1332 && !HONOR_SIGNED_ZEROS (type)))))
0a8f32b8
RB
1333
1334/* (-A) * (-B) -> A * B */
1335(simplify
1336 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1337 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1338 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1339 (mult (convert @0) (convert (negate @1)))))
03cc70b5 1340
55cf3946 1341/* -(A + B) -> (-B) - A. */
b14a9c57 1342(simplify
55cf3946
RB
1343 (negate (plus:c @0 negate_expr_p@1))
1344 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1345 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1346 (minus (negate @1) @0)))
1347
81bd903a
MG
1348/* -(A - B) -> B - A. */
1349(simplify
1350 (negate (minus @0 @1))
1351 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1352 || (FLOAT_TYPE_P (type)
1353 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1354 && !HONOR_SIGNED_ZEROS (type)))
1355 (minus @1 @0)))
1af4ebf5
MG
1356(simplify
1357 (negate (pointer_diff @0 @1))
1358 (if (TYPE_OVERFLOW_UNDEFINED (type))
1359 (pointer_diff @1 @0)))
81bd903a 1360
55cf3946 1361/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 1362(simplify
55cf3946 1363 (minus @0 negate_expr_p@1)
e4e96a4f
KT
1364 (if (!FIXED_POINT_TYPE_P (type))
1365 (plus @0 (negate @1))))
d4573ffe 1366
5609420f
RB
1367/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1368 when profitable.
1369 For bitwise binary operations apply operand conversions to the
1370 binary operation result instead of to the operands. This allows
1371 to combine successive conversions and bitwise binary operations.
1372 We combine the above two cases by using a conditional convert. */
1373(for bitop (bit_and bit_ior bit_xor)
1374 (simplify
496f4f88 1375 (bitop (convert@2 @0) (convert?@3 @1))
5609420f
RB
1376 (if (((TREE_CODE (@1) == INTEGER_CST
1377 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 1378 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 1379 || types_match (@0, @1))
ad6f996c
RB
1380 /* ??? This transform conflicts with fold-const.c doing
1381 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1382 constants (if x has signed type, the sign bit cannot be set
1383 in c). This folds extension into the BIT_AND_EXPR.
1384 Restrict it to GIMPLE to avoid endless recursions. */
1385 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
1386 && (/* That's a good idea if the conversion widens the operand, thus
1387 after hoisting the conversion the operation will be narrower. */
1388 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1389 /* It's also a good idea if the conversion is to a non-integer
1390 mode. */
1391 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1392 /* Or if the precision of TO is not the same as the precision
1393 of its mode. */
496f4f88
JJ
1394 || !type_has_mode_precision_p (type)
1395 /* In GIMPLE, getting rid of 2 conversions for one new results
1396 in smaller IL. */
1397 || (GIMPLE
1398 && TREE_CODE (@1) != INTEGER_CST
1399 && tree_nop_conversion_p (type, TREE_TYPE (@0))
1400 && single_use (@2)
1401 && single_use (@3))))
1402 (convert (bitop @0 (convert @1)))))
1403 /* In GIMPLE, getting rid of 2 conversions for one new results
1404 in smaller IL. */
1405 (simplify
1406 (convert (bitop:cs@2 (nop_convert:s @0) @1))
1407 (if (GIMPLE
1408 && TREE_CODE (@1) != INTEGER_CST
1409 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1410 && types_match (type, @0))
1411 (bitop @0 (convert @1)))))
5609420f 1412
b14a9c57
RB
1413(for bitop (bit_and bit_ior)
1414 rbitop (bit_ior bit_and)
1415 /* (x | y) & x -> x */
1416 /* (x & y) | x -> x */
1417 (simplify
1418 (bitop:c (rbitop:c @0 @1) @0)
1419 @0)
1420 /* (~x | y) & x -> x & y */
1421 /* (~x & y) | x -> x | y */
1422 (simplify
1423 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1424 (bitop @0 @1)))
1425
5609420f
RB
1426/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1427(simplify
1428 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1429 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1430
1431/* Combine successive equal operations with constants. */
1432(for bitop (bit_and bit_ior bit_xor)
1433 (simplify
1434 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
fba05d9e
RS
1435 (if (!CONSTANT_CLASS_P (@0))
1436 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1437 folded to a constant. */
1438 (bitop @0 (bitop @1 @2))
1439 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1440 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1441 the values involved are such that the operation can't be decided at
1442 compile time. Try folding one of @0 or @1 with @2 to see whether
1443 that combination can be decided at compile time.
1444
1445 Keep the existing form if both folds fail, to avoid endless
1446 oscillation. */
1447 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1448 (if (cst1)
1449 (bitop @1 { cst1; })
1450 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1451 (if (cst2)
1452 (bitop @0 { cst2; }))))))))
5609420f
RB
1453
1454/* Try simple folding for X op !X, and X op X with the help
1455 of the truth_valued_p and logical_inverted_value predicates. */
1456(match truth_valued_p
1457 @0
1458 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 1459(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
1460 (match truth_valued_p
1461 (op @0 @1)))
1462(match truth_valued_p
1463 (truth_not @0))
1464
0a8f32b8
RB
1465(match (logical_inverted_value @0)
1466 (truth_not @0))
5609420f
RB
1467(match (logical_inverted_value @0)
1468 (bit_not truth_valued_p@0))
1469(match (logical_inverted_value @0)
09240451 1470 (eq @0 integer_zerop))
5609420f 1471(match (logical_inverted_value @0)
09240451 1472 (ne truth_valued_p@0 integer_truep))
5609420f 1473(match (logical_inverted_value @0)
09240451 1474 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
1475
1476/* X & !X -> 0. */
1477(simplify
1478 (bit_and:c @0 (logical_inverted_value @0))
1479 { build_zero_cst (type); })
1480/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1481(for op (bit_ior bit_xor)
1482 (simplify
1483 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 1484 { constant_boolean_node (true, type); }))
59c20dc7
RB
1485/* X ==/!= !X is false/true. */
1486(for op (eq ne)
1487 (simplify
1488 (op:c truth_valued_p@0 (logical_inverted_value @0))
1489 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 1490
5609420f
RB
1491/* ~~x -> x */
1492(simplify
1493 (bit_not (bit_not @0))
1494 @0)
1495
b14a9c57
RB
1496/* Convert ~ (-A) to A - 1. */
1497(simplify
1498 (bit_not (convert? (negate @0)))
ece46666
MG
1499 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1500 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 1501 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57 1502
81bd903a
MG
1503/* Convert - (~A) to A + 1. */
1504(simplify
e150da38 1505 (negate (nop_convert? (bit_not @0)))
81bd903a
MG
1506 (plus (view_convert @0) { build_each_one_cst (type); }))
1507
b14a9c57
RB
1508/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1509(simplify
8b5ee871 1510 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
1511 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1512 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1513 (convert (negate @0))))
1514(simplify
1515 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
1516 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1517 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1518 (convert (negate @0))))
1519
1520/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1521(simplify
1522 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1523 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1524 (convert (bit_xor @0 (bit_not @1)))))
1525(simplify
1526 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1527 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1528 (convert (bit_xor @0 @1))))
1529
e268a77b
MG
1530/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1531(simplify
e150da38 1532 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
e268a77b
MG
1533 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1534 (bit_not (bit_xor (view_convert @0) @1))))
1535
f52baa7b
MP
1536/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1537(simplify
44fc0a51
RB
1538 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1539 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 1540
f7b7b0aa
MP
1541/* Fold A - (A & B) into ~B & A. */
1542(simplify
2eef1fc1 1543 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
f7b7b0aa
MP
1544 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1545 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1546 (convert (bit_and (bit_not @1) @0))))
5609420f 1547
2071f8f9
N
1548/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1549(for cmp (gt lt ge le)
1550(simplify
1551 (mult (convert (cmp @0 @1)) @2)
187dd955
JJ
1552 (if (GIMPLE || !TREE_SIDE_EFFECTS (@2))
1553 (cond (cmp @0 @1) @2 { build_zero_cst (type); }))))
2071f8f9 1554
e36c1cfe
N
1555/* For integral types with undefined overflow and C != 0 fold
1556 x * C EQ/NE y * C into x EQ/NE y. */
1557(for cmp (eq ne)
1558 (simplify
1559 (cmp (mult:c @0 @1) (mult:c @2 @1))
1560 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1561 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1562 && tree_expr_nonzero_p (@1))
1563 (cmp @0 @2))))
1564
42bd89ce
MG
1565/* For integral types with wrapping overflow and C odd fold
1566 x * C EQ/NE y * C into x EQ/NE y. */
1567(for cmp (eq ne)
1568 (simplify
1569 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1570 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1571 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1572 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1573 (cmp @0 @2))))
1574
e36c1cfe
N
1575/* For integral types with undefined overflow and C != 0 fold
1576 x * C RELOP y * C into:
84ff66b8 1577
e36c1cfe
N
1578 x RELOP y for nonnegative C
1579 y RELOP x for negative C */
1580(for cmp (lt gt le ge)
1581 (simplify
1582 (cmp (mult:c @0 @1) (mult:c @2 @1))
1583 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1584 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1585 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1586 (cmp @0 @2)
1587 (if (TREE_CODE (@1) == INTEGER_CST
8e6cdc90 1588 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
e36c1cfe 1589 (cmp @2 @0))))))
84ff66b8 1590
564e405c
JJ
1591/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1592(for cmp (le gt)
1593 icmp (gt le)
1594 (simplify
1595 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1596 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1597 && TYPE_UNSIGNED (TREE_TYPE (@0))
1598 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
8e6cdc90
RS
1599 && (wi::to_wide (@2)
1600 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
564e405c
JJ
1601 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1602 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1603
a8492d5e
MG
1604/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1605(for cmp (simple_comparison)
1606 (simplify
9adfa8e2
MG
1607 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
1608 (if (element_precision (@3) >= element_precision (@0)
1609 && types_match (@0, @1))
9cf60d3b 1610 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
9adfa8e2
MG
1611 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
1612 (cmp @1 @0)
1613 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
1614 (with
1615 {
1616 tree utype = unsigned_type_for (TREE_TYPE (@0));
1617 }
1618 (cmp (convert:utype @1) (convert:utype @0)))))
1619 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
1620 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
1621 (cmp @0 @1)
1622 (with
1623 {
1624 tree utype = unsigned_type_for (TREE_TYPE (@0));
1625 }
1626 (cmp (convert:utype @0) (convert:utype @1)))))))))
a8492d5e 1627
8d1628eb
JJ
1628/* X / C1 op C2 into a simple range test. */
1629(for cmp (simple_comparison)
1630 (simplify
1631 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1632 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1633 && integer_nonzerop (@1)
1634 && !TREE_OVERFLOW (@1)
1635 && !TREE_OVERFLOW (@2))
1636 (with { tree lo, hi; bool neg_overflow;
1637 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1638 &neg_overflow); }
1639 (switch
1640 (if (code == LT_EXPR || code == GE_EXPR)
1641 (if (TREE_OVERFLOW (lo))
1642 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1643 (if (code == LT_EXPR)
1644 (lt @0 { lo; })
1645 (ge @0 { lo; }))))
1646 (if (code == LE_EXPR || code == GT_EXPR)
1647 (if (TREE_OVERFLOW (hi))
1648 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1649 (if (code == LE_EXPR)
1650 (le @0 { hi; })
1651 (gt @0 { hi; }))))
1652 (if (!lo && !hi)
1653 { build_int_cst (type, code == NE_EXPR); })
1654 (if (code == EQ_EXPR && !hi)
1655 (ge @0 { lo; }))
1656 (if (code == EQ_EXPR && !lo)
1657 (le @0 { hi; }))
1658 (if (code == NE_EXPR && !hi)
1659 (lt @0 { lo; }))
1660 (if (code == NE_EXPR && !lo)
1661 (gt @0 { hi; }))
1662 (if (GENERIC)
1663 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1664 lo, hi); })
1665 (with
1666 {
1667 tree etype = range_check_type (TREE_TYPE (@0));
1668 if (etype)
1669 {
8d1628eb
JJ
1670 hi = fold_convert (etype, hi);
1671 lo = fold_convert (etype, lo);
1672 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1673 }
1674 }
1675 (if (etype && hi && !TREE_OVERFLOW (hi))
1676 (if (code == EQ_EXPR)
1677 (le (minus (convert:etype @0) { lo; }) { hi; })
1678 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1679
d35256b6
MG
1680/* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1681(for op (lt le ge gt)
1682 (simplify
1683 (op (plus:c @0 @2) (plus:c @1 @2))
1684 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1685 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1686 (op @0 @1))))
1687/* For equality and subtraction, this is also true with wrapping overflow. */
1688(for op (eq ne minus)
1689 (simplify
1690 (op (plus:c @0 @2) (plus:c @1 @2))
1691 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1692 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1693 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1694 (op @0 @1))))
1695
1696/* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1697(for op (lt le ge gt)
1698 (simplify
1699 (op (minus @0 @2) (minus @1 @2))
1700 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1701 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1702 (op @0 @1))))
1703/* For equality and subtraction, this is also true with wrapping overflow. */
1704(for op (eq ne minus)
1705 (simplify
1706 (op (minus @0 @2) (minus @1 @2))
1707 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1708 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1709 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1710 (op @0 @1))))
1af4ebf5
MG
1711/* And for pointers... */
1712(for op (simple_comparison)
1713 (simplify
1714 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1715 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1716 (op @0 @1))))
1717(simplify
1718 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1719 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1720 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1721 (pointer_diff @0 @1)))
d35256b6
MG
1722
1723/* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1724(for op (lt le ge gt)
1725 (simplify
1726 (op (minus @2 @0) (minus @2 @1))
1727 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1728 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1729 (op @1 @0))))
1730/* For equality and subtraction, this is also true with wrapping overflow. */
1731(for op (eq ne minus)
1732 (simplify
1733 (op (minus @2 @0) (minus @2 @1))
1734 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1735 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1736 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1737 (op @1 @0))))
1af4ebf5
MG
1738/* And for pointers... */
1739(for op (simple_comparison)
1740 (simplify
1741 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1742 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1743 (op @1 @0))))
1744(simplify
1745 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1746 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1747 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1748 (pointer_diff @1 @0)))
d35256b6 1749
6358a676
MG
1750/* X + Y < Y is the same as X < 0 when there is no overflow. */
1751(for op (lt le gt ge)
1752 (simplify
1753 (op:c (plus:c@2 @0 @1) @1)
1754 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1755 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
cbd42900 1756 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
6358a676
MG
1757 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1758 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1759/* For equality, this is also true with wrapping overflow. */
1760(for op (eq ne)
1761 (simplify
e150da38 1762 (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
6358a676
MG
1763 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1764 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1765 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1766 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1767 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1768 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1769 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1770 (simplify
e150da38 1771 (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
6358a676
MG
1772 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1773 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1774 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1775 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1776
1777/* X - Y < X is the same as Y > 0 when there is no overflow.
1778 For equality, this is also true with wrapping overflow. */
1779(for op (simple_comparison)
1780 (simplify
1781 (op:c @0 (minus@2 @0 @1))
1782 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1783 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1784 || ((op == EQ_EXPR || op == NE_EXPR)
1785 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1786 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1787 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1788
1d6fadee 1789/* Transform:
b8d85005
JJ
1790 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1791 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1d6fadee
PK
1792(for cmp (eq ne)
1793 ocmp (lt ge)
1794 (simplify
1795 (cmp (trunc_div @0 @1) integer_zerop)
1796 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
b8d85005
JJ
1797 /* Complex ==/!= is allowed, but not </>=. */
1798 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1d6fadee
PK
1799 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1800 (ocmp @0 @1))))
1801
8b656ca7
MG
1802/* X == C - X can never be true if C is odd. */
1803(for cmp (eq ne)
1804 (simplify
1805 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1806 (if (TREE_INT_CST_LOW (@1) & 1)
1807 { constant_boolean_node (cmp == NE_EXPR, type); })))
1808
10bc8017
MG
1809/* Arguments on which one can call get_nonzero_bits to get the bits
1810 possibly set. */
1811(match with_possible_nonzero_bits
1812 INTEGER_CST@0)
1813(match with_possible_nonzero_bits
1814 SSA_NAME@0
1815 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1816/* Slightly extended version, do not make it recursive to keep it cheap. */
1817(match (with_possible_nonzero_bits2 @0)
1818 with_possible_nonzero_bits@0)
1819(match (with_possible_nonzero_bits2 @0)
1820 (bit_and:c with_possible_nonzero_bits@0 @2))
1821
1822/* Same for bits that are known to be set, but we do not have
1823 an equivalent to get_nonzero_bits yet. */
1824(match (with_certain_nonzero_bits2 @0)
1825 INTEGER_CST@0)
1826(match (with_certain_nonzero_bits2 @0)
1827 (bit_ior @1 INTEGER_CST@0))
1828
1829/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1830(for cmp (eq ne)
1831 (simplify
1832 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
8e6cdc90 1833 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
10bc8017
MG
1834 { constant_boolean_node (cmp == NE_EXPR, type); })))
1835
84ff66b8
AV
1836/* ((X inner_op C0) outer_op C1)
1837 With X being a tree where value_range has reasoned certain bits to always be
1838 zero throughout its computed value range,
1839 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1840 where zero_mask has 1's for all bits that are sure to be 0 in
1841 and 0's otherwise.
1842 if (inner_op == '^') C0 &= ~C1;
1843 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1844 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1845*/
1846(for inner_op (bit_ior bit_xor)
1847 outer_op (bit_xor bit_ior)
1848(simplify
1849 (outer_op
1850 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1851 (with
1852 {
1853 bool fail = false;
1854 wide_int zero_mask_not;
1855 wide_int C0;
1856 wide_int cst_emit;
1857
1858 if (TREE_CODE (@2) == SSA_NAME)
1859 zero_mask_not = get_nonzero_bits (@2);
1860 else
1861 fail = true;
1862
1863 if (inner_op == BIT_XOR_EXPR)
1864 {
8e6cdc90
RS
1865 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1866 cst_emit = C0 | wi::to_wide (@1);
84ff66b8
AV
1867 }
1868 else
1869 {
8e6cdc90
RS
1870 C0 = wi::to_wide (@0);
1871 cst_emit = C0 ^ wi::to_wide (@1);
84ff66b8
AV
1872 }
1873 }
8e6cdc90 1874 (if (!fail && (C0 & zero_mask_not) == 0)
84ff66b8 1875 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
8e6cdc90 1876 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
84ff66b8
AV
1877 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1878
a499aac5
RB
1879/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1880(simplify
44fc0a51
RB
1881 (pointer_plus (pointer_plus:s @0 @1) @3)
1882 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1883
1884/* Pattern match
1885 tem1 = (long) ptr1;
1886 tem2 = (long) ptr2;
1887 tem3 = tem2 - tem1;
1888 tem4 = (unsigned long) tem3;
1889 tem5 = ptr1 + tem4;
1890 and produce
1891 tem5 = ptr2; */
1892(simplify
1893 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1894 /* Conditionally look through a sign-changing conversion. */
1895 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1896 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1897 || (GENERIC && type == TREE_TYPE (@1))))
1898 @1))
1af4ebf5
MG
1899(simplify
1900 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1901 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1902 (convert @1)))
a499aac5
RB
1903
1904/* Pattern match
1905 tem = (sizetype) ptr;
1906 tem = tem & algn;
1907 tem = -tem;
1908 ... = ptr p+ tem;
1909 and produce the simpler and easier to analyze with respect to alignment
1910 ... = ptr & ~algn; */
1911(simplify
1912 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
8e6cdc90 1913 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
a499aac5
RB
1914 (bit_and @0 { algn; })))
1915
99e943a2
RB
1916/* Try folding difference of addresses. */
1917(simplify
1918 (minus (convert ADDR_EXPR@0) (convert @1))
1919 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1920 (with { poly_int64 diff; }
99e943a2
RB
1921 (if (ptr_difference_const (@0, @1, &diff))
1922 { build_int_cst_type (type, diff); }))))
1923(simplify
1924 (minus (convert @0) (convert ADDR_EXPR@1))
1925 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
f37fac2b 1926 (with { poly_int64 diff; }
99e943a2
RB
1927 (if (ptr_difference_const (@0, @1, &diff))
1928 { build_int_cst_type (type, diff); }))))
1af4ebf5 1929(simplify
67fccea4 1930 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1af4ebf5
MG
1931 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1932 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1933 (with { poly_int64 diff; }
1af4ebf5
MG
1934 (if (ptr_difference_const (@0, @1, &diff))
1935 { build_int_cst_type (type, diff); }))))
1936(simplify
67fccea4 1937 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1af4ebf5
MG
1938 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1939 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
f37fac2b 1940 (with { poly_int64 diff; }
1af4ebf5
MG
1941 (if (ptr_difference_const (@0, @1, &diff))
1942 { build_int_cst_type (type, diff); }))))
99e943a2 1943
cb99630f
RB
1944/* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */
1945(simplify
1946 (convert (pointer_diff @0 INTEGER_CST@1))
1947 (if (POINTER_TYPE_P (type))
1948 { build_fold_addr_expr_with_type
1949 (build2 (MEM_REF, char_type_node, @0,
1950 wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))),
1951 type); }))
1952
bab73f11
RB
1953/* If arg0 is derived from the address of an object or function, we may
1954 be able to fold this expression using the object or function's
1955 alignment. */
1956(simplify
1957 (bit_and (convert? @0) INTEGER_CST@1)
1958 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1959 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1960 (with
1961 {
1962 unsigned int align;
1963 unsigned HOST_WIDE_INT bitpos;
1964 get_pointer_alignment_1 (@0, &align, &bitpos);
1965 }
8e6cdc90
RS
1966 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1967 { wide_int_to_tree (type, (wi::to_wide (@1)
1968 & (bitpos / BITS_PER_UNIT))); }))))
99e943a2 1969
c16504f6
LJH
1970(match min_value
1971 INTEGER_CST
1972 (if (INTEGRAL_TYPE_P (type)
1973 && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
1974
1975(match max_value
1976 INTEGER_CST
1977 (if (INTEGRAL_TYPE_P (type)
1978 && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
1979
1980/* x > y && x != XXX_MIN --> x > y
1981 x > y && x == XXX_MIN --> false . */
1982(for eqne (eq ne)
1983 (simplify
1984 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
1985 (switch
1986 (if (eqne == EQ_EXPR)
1987 { constant_boolean_node (false, type); })
1988 (if (eqne == NE_EXPR)
1989 @2)
1990 )))
1991
1992/* x < y && x != XXX_MAX --> x < y
1993 x < y && x == XXX_MAX --> false. */
1994(for eqne (eq ne)
1995 (simplify
1996 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
1997 (switch
1998 (if (eqne == EQ_EXPR)
1999 { constant_boolean_node (false, type); })
2000 (if (eqne == NE_EXPR)
2001 @2)
2002 )))
2003
2004/* x <= y && x == XXX_MIN --> x == XXX_MIN. */
2005(simplify
2006 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
2007 @2)
2008
2009/* x >= y && x == XXX_MAX --> x == XXX_MAX. */
2010(simplify
2011 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
2012 @2)
2013
2014/* x > y || x != XXX_MIN --> x != XXX_MIN. */
2015(simplify
2016 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
2017 @2)
2018
2019/* x <= y || x != XXX_MIN --> true. */
2020(simplify
2021 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
2022 { constant_boolean_node (true, type); })
2023
2024/* x <= y || x == XXX_MIN --> x <= y. */
2025(simplify
2026 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
2027 @2)
2028
2029/* x < y || x != XXX_MAX --> x != XXX_MAX. */
2030(simplify
2031 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
2032 @2)
2033
2034/* x >= y || x != XXX_MAX --> true
2035 x >= y || x == XXX_MAX --> x >= y. */
2036(for eqne (eq ne)
2037 (simplify
2038 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
2039 (switch
2040 (if (eqne == EQ_EXPR)
2041 @2)
2042 (if (eqne == NE_EXPR)
2043 { constant_boolean_node (true, type); }))))
a499aac5 2044
ae9c3507
ML
2045/* Convert (X == CST1) && (X OP2 CST2) to a known value
2046 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2047
2048(for code1 (eq ne)
2049 (for code2 (eq ne lt gt le ge)
2050 (simplify
2051 (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2052 (with
2053 {
2054 int cmp = tree_int_cst_compare (@1, @2);
2055 bool val;
2056 switch (code2)
2057 {
2058 case EQ_EXPR: val = (cmp == 0); break;
2059 case NE_EXPR: val = (cmp != 0); break;
2060 case LT_EXPR: val = (cmp < 0); break;
2061 case GT_EXPR: val = (cmp > 0); break;
2062 case LE_EXPR: val = (cmp <= 0); break;
2063 case GE_EXPR: val = (cmp >= 0); break;
2064 default: gcc_unreachable ();
2065 }
2066 }
2067 (switch
2068 (if (code1 == EQ_EXPR && val) @3)
2069 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
2070 (if (code1 == NE_EXPR && !val) @4))))))
2071
2072/* Convert (X OP1 CST1) && (X OP2 CST2). */
2073
2074(for code1 (lt le gt ge)
2075 (for code2 (lt le gt ge)
2076 (simplify
2077 (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2))
2078 (with
2079 {
2080 int cmp = tree_int_cst_compare (@1, @2);
2081 }
2082 (switch
2083 /* Choose the more restrictive of two < or <= comparisons. */
2084 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2085 && (code2 == LT_EXPR || code2 == LE_EXPR))
2086 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2087 @3
2088 @4))
2089 /* Likewise chose the more restrictive of two > or >= comparisons. */
2090 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2091 && (code2 == GT_EXPR || code2 == GE_EXPR))
2092 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2093 @3
2094 @4))
2095 /* Check for singleton ranges. */
2096 (if (cmp == 0
2097 && ((code1 == LE_EXPR && code2 == GE_EXPR)
2098 || (code1 == GE_EXPR && code2 == LE_EXPR)))
2099 (eq @0 @1))
2100 /* Check for disjoint ranges. */
2101 (if (cmp <= 0
2102 && (code1 == LT_EXPR || code1 == LE_EXPR)
2103 && (code2 == GT_EXPR || code2 == GE_EXPR))
2104 { constant_boolean_node (false, type); })
2105 (if (cmp >= 0
2106 && (code1 == GT_EXPR || code1 == GE_EXPR)
2107 && (code2 == LT_EXPR || code2 == LE_EXPR))
2108 { constant_boolean_node (false, type); })
2109 )))))
2110
130c4034
ML
2111/* Convert (X == CST1) || (X OP2 CST2) to a known value
2112 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2113
2114(for code1 (eq ne)
2115 (for code2 (eq ne lt gt le ge)
2116 (simplify
2117 (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2118 (with
2119 {
2120 int cmp = tree_int_cst_compare (@1, @2);
2121 bool val;
2122 switch (code2)
2123 {
2124 case EQ_EXPR: val = (cmp == 0); break;
2125 case NE_EXPR: val = (cmp != 0); break;
2126 case LT_EXPR: val = (cmp < 0); break;
2127 case GT_EXPR: val = (cmp > 0); break;
2128 case LE_EXPR: val = (cmp <= 0); break;
2129 case GE_EXPR: val = (cmp >= 0); break;
2130 default: gcc_unreachable ();
2131 }
2132 }
2133 (switch
2134 (if (code1 == EQ_EXPR && val) @4)
2135 (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); })
2136 (if (code1 == NE_EXPR && !val) @3))))))
2137
cda65821
ML
2138/* Convert (X OP1 CST1) || (X OP2 CST2). */
2139
2140(for code1 (lt le gt ge)
2141 (for code2 (lt le gt ge)
2142 (simplify
2143 (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2144 (with
2145 {
2146 int cmp = tree_int_cst_compare (@1, @2);
2147 }
2148 (switch
2149 /* Choose the more restrictive of two < or <= comparisons. */
2150 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2151 && (code2 == LT_EXPR || code2 == LE_EXPR))
2152 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2153 @4
2154 @3))
2155 /* Likewise chose the more restrictive of two > or >= comparisons. */
2156 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2157 && (code2 == GT_EXPR || code2 == GE_EXPR))
2158 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2159 @4
2160 @3))
2161 /* Check for singleton ranges. */
2162 (if (cmp == 0
2163 && ((code1 == LT_EXPR && code2 == GT_EXPR)
2164 || (code1 == GT_EXPR && code2 == LT_EXPR)))
2165 (ne @0 @2))
2166 /* Check for disjoint ranges. */
2167 (if (cmp >= 0
2168 && (code1 == LT_EXPR || code1 == LE_EXPR)
2169 && (code2 == GT_EXPR || code2 == GE_EXPR))
2170 { constant_boolean_node (true, type); })
2171 (if (cmp <= 0
2172 && (code1 == GT_EXPR || code1 == GE_EXPR)
2173 && (code2 == LT_EXPR || code2 == LE_EXPR))
2174 { constant_boolean_node (true, type); })
2175 )))))
130c4034 2176
cc7b5acf
RB
2177/* We can't reassociate at all for saturating types. */
2178(if (!TYPE_SATURATING (type))
2179
2180 /* Contract negates. */
2181 /* A + (-B) -> A - B */
2182 (simplify
248179b5
RB
2183 (plus:c @0 (convert? (negate @1)))
2184 /* Apply STRIP_NOPS on the negate. */
2185 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 2186 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
2187 (with
2188 {
2189 tree t1 = type;
2190 if (INTEGRAL_TYPE_P (type)
2191 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2192 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2193 }
2194 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
cc7b5acf
RB
2195 /* A - (-B) -> A + B */
2196 (simplify
248179b5
RB
2197 (minus @0 (convert? (negate @1)))
2198 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 2199 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
2200 (with
2201 {
2202 tree t1 = type;
2203 if (INTEGRAL_TYPE_P (type)
2204 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2205 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2206 }
2207 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
63626547
MG
2208 /* -(T)(-A) -> (T)A
2209 Sign-extension is ok except for INT_MIN, which thankfully cannot
2210 happen without overflow. */
2211 (simplify
2212 (negate (convert (negate @1)))
2213 (if (INTEGRAL_TYPE_P (type)
2214 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
2215 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2216 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2217 && !TYPE_OVERFLOW_SANITIZED (type)
2218 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
a0f12cf8 2219 (convert @1)))
63626547
MG
2220 (simplify
2221 (negate (convert negate_expr_p@1))
2222 (if (SCALAR_FLOAT_TYPE_P (type)
2223 && ((DECIMAL_FLOAT_TYPE_P (type)
2224 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2225 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2226 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2227 (convert (negate @1))))
2228 (simplify
e150da38 2229 (negate (nop_convert? (negate @1)))
63626547
MG
2230 (if (!TYPE_OVERFLOW_SANITIZED (type)
2231 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2232 (view_convert @1)))
cc7b5acf 2233
7318e44f
RB
2234 /* We can't reassociate floating-point unless -fassociative-math
2235 or fixed-point plus or minus because of saturation to +-Inf. */
2236 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2237 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
2238
2239 /* Match patterns that allow contracting a plus-minus pair
2240 irrespective of overflow issues. */
2241 /* (A +- B) - A -> +- B */
2242 /* (A +- B) -+ B -> A */
2243 /* A - (A +- B) -> -+ B */
2244 /* A +- (B -+ A) -> +- B */
2245 (simplify
e150da38 2246 (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
526b4c71 2247 (view_convert @1))
cc7b5acf 2248 (simplify
e150da38 2249 (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
526b4c71
JJ
2250 (if (!ANY_INTEGRAL_TYPE_P (type)
2251 || TYPE_OVERFLOW_WRAPS (type))
2252 (negate (view_convert @1))
2253 (view_convert (negate @1))))
cc7b5acf 2254 (simplify
e150da38 2255 (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
526b4c71 2256 (view_convert @0))
cc7b5acf 2257 (simplify
e150da38 2258 (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
526b4c71
JJ
2259 (if (!ANY_INTEGRAL_TYPE_P (type)
2260 || TYPE_OVERFLOW_WRAPS (type))
2261 (negate (view_convert @1))
2262 (view_convert (negate @1))))
cc7b5acf 2263 (simplify
e150da38 2264 (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
526b4c71 2265 (view_convert @1))
1e7df2e6
MG
2266 /* (A +- B) + (C - A) -> C +- B */
2267 /* (A + B) - (A - C) -> B + C */
2268 /* More cases are handled with comparisons. */
2269 (simplify
2270 (plus:c (plus:c @0 @1) (minus @2 @0))
2271 (plus @2 @1))
2272 (simplify
2273 (plus:c (minus @0 @1) (minus @2 @0))
2274 (minus @2 @1))
1af4ebf5
MG
2275 (simplify
2276 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2277 (if (TYPE_OVERFLOW_UNDEFINED (type)
2278 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2279 (pointer_diff @2 @1)))
1e7df2e6
MG
2280 (simplify
2281 (minus (plus:c @0 @1) (minus @0 @2))
2282 (plus @1 @2))
cc7b5acf 2283
ed73f46f
MG
2284 /* (A +- CST1) +- CST2 -> A + CST3
2285 Use view_convert because it is safe for vectors and equivalent for
2286 scalars. */
cc7b5acf
RB
2287 (for outer_op (plus minus)
2288 (for inner_op (plus minus)
ed73f46f 2289 neg_inner_op (minus plus)
cc7b5acf 2290 (simplify
e150da38 2291 (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
ed73f46f
MG
2292 CONSTANT_CLASS_P@2)
2293 /* If one of the types wraps, use that one. */
2294 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
3eb1eecf
JJ
2295 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2296 forever if something doesn't simplify into a constant. */
2297 (if (!CONSTANT_CLASS_P (@0))
2298 (if (outer_op == PLUS_EXPR)
2299 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
2300 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
ed73f46f
MG
2301 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2302 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2303 (if (outer_op == PLUS_EXPR)
2304 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
2305 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
2306 /* If the constant operation overflows we cannot do the transform
2307 directly as we would introduce undefined overflow, for example
2308 with (a - 1) + INT_MIN. */
2309 (if (types_match (type, @0))
2310 (with { tree cst = const_binop (outer_op == inner_op
2311 ? PLUS_EXPR : MINUS_EXPR,
2312 type, @1, @2); }
2313 (if (cst && !TREE_OVERFLOW (cst))
2314 (inner_op @0 { cst; } )
2315 /* X+INT_MAX+1 is X-INT_MIN. */
2316 (if (INTEGRAL_TYPE_P (type) && cst
8e6cdc90
RS
2317 && wi::to_wide (cst) == wi::min_value (type))
2318 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
ed73f46f
MG
2319 /* Last resort, use some unsigned type. */
2320 (with { tree utype = unsigned_type_for (type); }
48fcd201
JJ
2321 (if (utype)
2322 (view_convert (inner_op
2323 (view_convert:utype @0)
2324 (view_convert:utype
2325 { drop_tree_overflow (cst); }))))))))))))))
cc7b5acf 2326
b302f2e0 2327 /* (CST1 - A) +- CST2 -> CST3 - A */
cc7b5acf
RB
2328 (for outer_op (plus minus)
2329 (simplify
e150da38 2330 (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
129bd066
JJ
2331 /* If one of the types wraps, use that one. */
2332 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2333 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2334 forever if something doesn't simplify into a constant. */
2335 (if (!CONSTANT_CLASS_P (@0))
2336 (minus (outer_op (view_convert @1) @2) (view_convert @0)))
2337 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2338 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2339 (view_convert (minus (outer_op @1 (view_convert @2)) @0))
2340 (if (types_match (type, @0))
2341 (with { tree cst = const_binop (outer_op, type, @1, @2); }
2342 (if (cst && !TREE_OVERFLOW (cst))
2343 (minus { cst; } @0))))))))
2344
2345 /* CST1 - (CST2 - A) -> CST3 + A
2346 Use view_convert because it is safe for vectors and equivalent for
2347 scalars. */
b302f2e0 2348 (simplify
e150da38 2349 (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
129bd066
JJ
2350 /* If one of the types wraps, use that one. */
2351 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2352 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2353 forever if something doesn't simplify into a constant. */
2354 (if (!CONSTANT_CLASS_P (@0))
2355 (plus (view_convert @0) (minus @1 (view_convert @2))))
2356 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2357 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2358 (view_convert (plus @0 (minus (view_convert @1) @2)))
2359 (if (types_match (type, @0))
2360 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2361 (if (cst && !TREE_OVERFLOW (cst))
2362 (plus { cst; } @0)))))))
b302f2e0 2363
df7d46d9
RD
2364/* ((T)(A)) + CST -> (T)(A + CST) */
2365#if GIMPLE
2366 (simplify
2367 (plus (convert SSA_NAME@0) INTEGER_CST@1)
2368 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2369 && TREE_CODE (type) == INTEGER_TYPE
2370 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2371 && int_fits_type_p (@1, TREE_TYPE (@0)))
2372 /* Perform binary operation inside the cast if the constant fits
2373 and (A + CST)'s range does not overflow. */
2374 (with
2375 {
2376 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
2377 max_ovf = wi::OVF_OVERFLOW;
2378 tree inner_type = TREE_TYPE (@0);
2379
81b40582
JJ
2380 wide_int w1
2381 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
2382 TYPE_SIGN (inner_type));
df7d46d9
RD
2383
2384 wide_int wmin0, wmax0;
2385 if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
2386 {
2387 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
2388 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
2389 }
2390 }
2391 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
2392 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
2393 )))
2394#endif
2395
81b40582
JJ
2396/* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */
2397#if GIMPLE
2398 (for op (plus minus)
2399 (simplify
2400 (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2)
2401 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2402 && TREE_CODE (type) == INTEGER_TYPE
2403 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2404 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2405 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2406 && TYPE_OVERFLOW_WRAPS (type))
2407 (plus (convert @0) (op @2 (convert @1))))))
2408#endif
2409
8f0d743c
FX
2410/* (T)(A) +- (T)(B) -> (T)(A +- B) only when (A +- B) could be simplified
2411 to a simple value. */
2412#if GIMPLE
2413 (for op (plus minus)
2414 (simplify
2415 (op (convert @0) (convert @1))
2416 (if (INTEGRAL_TYPE_P (type)
2417 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2418 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2419 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))
2420 && !TYPE_OVERFLOW_TRAPS (type)
2421 && !TYPE_OVERFLOW_SANITIZED (type))
2422 (convert (op! @0 @1)))))
2423#endif
2424
cc7b5acf
RB
2425 /* ~A + A -> -1 */
2426 (simplify
2427 (plus:c (bit_not @0) @0)
2428 (if (!TYPE_OVERFLOW_TRAPS (type))
2429 { build_all_ones_cst (type); }))
2430
2431 /* ~A + 1 -> -A */
2432 (simplify
e19740ae
RB
2433 (plus (convert? (bit_not @0)) integer_each_onep)
2434 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2435 (negate (convert @0))))
2436
2437 /* -A - 1 -> ~A */
2438 (simplify
2439 (minus (convert? (negate @0)) integer_each_onep)
2440 (if (!TYPE_OVERFLOW_TRAPS (type)
2441 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2442 (bit_not (convert @0))))
2443
2444 /* -1 - A -> ~A */
2445 (simplify
2446 (minus integer_all_onesp @0)
bc4315fb 2447 (bit_not @0))
cc7b5acf
RB
2448
2449 /* (T)(P + A) - (T)P -> (T) A */
d7f44d4d 2450 (simplify
a72610d4
JJ
2451 (minus (convert (plus:c @@0 @1))
2452 (convert? @0))
d7f44d4d
JJ
2453 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2454 /* For integer types, if A has a smaller type
2455 than T the result depends on the possible
2456 overflow in P + A.
2457 E.g. T=size_t, A=(unsigned)429497295, P>0.
2458 However, if an overflow in P + A would cause
2459 undefined behavior, we can assume that there
2460 is no overflow. */
a72610d4
JJ
2461 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2462 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
2463 (convert @1)))
2464 (simplify
2465 (minus (convert (pointer_plus @@0 @1))
2466 (convert @0))
2467 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2468 /* For pointer types, if the conversion of A to the
2469 final type requires a sign- or zero-extension,
2470 then we have to punt - it is not defined which
2471 one is correct. */
2472 || (POINTER_TYPE_P (TREE_TYPE (@0))
2473 && TREE_CODE (@1) == INTEGER_CST
2474 && tree_int_cst_sign_bit (@1) == 0))
2475 (convert @1)))
1af4ebf5
MG
2476 (simplify
2477 (pointer_diff (pointer_plus @@0 @1) @0)
2478 /* The second argument of pointer_plus must be interpreted as signed, and
2479 thus sign-extended if necessary. */
2480 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2481 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2482 second arg is unsigned even when we need to consider it as signed,
2483 we don't want to diagnose overflow here. */
2484 (convert (view_convert:stype @1))))
a8fc2579
RB
2485
2486 /* (T)P - (T)(P + A) -> -(T) A */
d7f44d4d 2487 (simplify
a72610d4
JJ
2488 (minus (convert? @0)
2489 (convert (plus:c @@0 @1)))
d7f44d4d
JJ
2490 (if (INTEGRAL_TYPE_P (type)
2491 && TYPE_OVERFLOW_UNDEFINED (type)
2492 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2493 (with { tree utype = unsigned_type_for (type); }
2494 (convert (negate (convert:utype @1))))
a8fc2579
RB
2495 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2496 /* For integer types, if A has a smaller type
2497 than T the result depends on the possible
2498 overflow in P + A.
2499 E.g. T=size_t, A=(unsigned)429497295, P>0.
2500 However, if an overflow in P + A would cause
2501 undefined behavior, we can assume that there
2502 is no overflow. */
a72610d4
JJ
2503 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2504 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
d7f44d4d
JJ
2505 (negate (convert @1)))))
2506 (simplify
2507 (minus (convert @0)
2508 (convert (pointer_plus @@0 @1)))
2509 (if (INTEGRAL_TYPE_P (type)
2510 && TYPE_OVERFLOW_UNDEFINED (type)
2511 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2512 (with { tree utype = unsigned_type_for (type); }
2513 (convert (negate (convert:utype @1))))
2514 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2515 /* For pointer types, if the conversion of A to the
2516 final type requires a sign- or zero-extension,
2517 then we have to punt - it is not defined which
2518 one is correct. */
2519 || (POINTER_TYPE_P (TREE_TYPE (@0))
2520 && TREE_CODE (@1) == INTEGER_CST
2521 && tree_int_cst_sign_bit (@1) == 0))
2522 (negate (convert @1)))))
1af4ebf5
MG
2523 (simplify
2524 (pointer_diff @0 (pointer_plus @@0 @1))
2525 /* The second argument of pointer_plus must be interpreted as signed, and
2526 thus sign-extended if necessary. */
2527 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2528 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2529 second arg is unsigned even when we need to consider it as signed,
2530 we don't want to diagnose overflow here. */
2531 (negate (convert (view_convert:stype @1)))))
a8fc2579
RB
2532
2533 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
d7f44d4d 2534 (simplify
a72610d4 2535 (minus (convert (plus:c @@0 @1))
d7f44d4d
JJ
2536 (convert (plus:c @0 @2)))
2537 (if (INTEGRAL_TYPE_P (type)
2538 && TYPE_OVERFLOW_UNDEFINED (type)
a72610d4
JJ
2539 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2540 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
d7f44d4d
JJ
2541 (with { tree utype = unsigned_type_for (type); }
2542 (convert (minus (convert:utype @1) (convert:utype @2))))
a72610d4
JJ
2543 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2544 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2545 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2546 /* For integer types, if A has a smaller type
2547 than T the result depends on the possible
2548 overflow in P + A.
2549 E.g. T=size_t, A=(unsigned)429497295, P>0.
2550 However, if an overflow in P + A would cause
2551 undefined behavior, we can assume that there
2552 is no overflow. */
2553 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2554 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2555 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2556 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
d7f44d4d
JJ
2557 (minus (convert @1) (convert @2)))))
2558 (simplify
2559 (minus (convert (pointer_plus @@0 @1))
2560 (convert (pointer_plus @0 @2)))
2561 (if (INTEGRAL_TYPE_P (type)
2562 && TYPE_OVERFLOW_UNDEFINED (type)
2563 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2564 (with { tree utype = unsigned_type_for (type); }
2565 (convert (minus (convert:utype @1) (convert:utype @2))))
2566 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
a8fc2579
RB
2567 /* For pointer types, if the conversion of A to the
2568 final type requires a sign- or zero-extension,
2569 then we have to punt - it is not defined which
2570 one is correct. */
2571 || (POINTER_TYPE_P (TREE_TYPE (@0))
2572 && TREE_CODE (@1) == INTEGER_CST
2573 && tree_int_cst_sign_bit (@1) == 0
2574 && TREE_CODE (@2) == INTEGER_CST
2575 && tree_int_cst_sign_bit (@2) == 0))
d7f44d4d 2576 (minus (convert @1) (convert @2)))))
459f6f68
FX
2577 (simplify
2578 (pointer_diff (pointer_plus @0 @2) (pointer_plus @1 @2))
2579 (pointer_diff @0 @1))
1af4ebf5
MG
2580 (simplify
2581 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2582 /* The second argument of pointer_plus must be interpreted as signed, and
2583 thus sign-extended if necessary. */
2584 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
8ae43881
JJ
2585 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2586 second arg is unsigned even when we need to consider it as signed,
2587 we don't want to diagnose overflow here. */
2588 (minus (convert (view_convert:stype @1))
2589 (convert (view_convert:stype @2)))))))
cc7b5acf 2590
5b55e6e3
RB
2591/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2592 Modeled after fold_plusminus_mult_expr. */
2593(if (!TYPE_SATURATING (type)
2594 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2595 (for plusminus (plus minus)
2596 (simplify
c1bbe5b3 2597 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
f9d2def0
FX
2598 (if (!ANY_INTEGRAL_TYPE_P (type)
2599 || TYPE_OVERFLOW_WRAPS (type)
2600 || (INTEGRAL_TYPE_P (type)
2601 && tree_expr_nonzero_p (@0)
2602 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2603 (if (single_use (@3) || single_use (@4))
2604 /* If @1 +- @2 is constant require a hard single-use on either
2605 original operand (but not on both). */
2606 (mult (plusminus @1 @2) @0)
2607#if GIMPLE
2608 (mult! (plusminus @1 @2) @0)
2609#endif
2610 )))
c1bbe5b3
RB
2611 /* We cannot generate constant 1 for fract. */
2612 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2613 (simplify
2614 (plusminus @0 (mult:c@3 @0 @2))
2615 (if ((!ANY_INTEGRAL_TYPE_P (type)
2616 || TYPE_OVERFLOW_WRAPS (type)
a213ab38
JJ
2617 /* For @0 + @0*@2 this transformation would introduce UB
2618 (where there was none before) for @0 in [-1,0] and @2 max.
2619 For @0 - @0*@2 this transformation would introduce UB
2620 for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */
c1bbe5b3 2621 || (INTEGRAL_TYPE_P (type)
a213ab38
JJ
2622 && ((tree_expr_nonzero_p (@0)
2623 && expr_not_equal_to (@0,
2624 wi::minus_one (TYPE_PRECISION (type))))
2625 || (plusminus == PLUS_EXPR
2626 ? expr_not_equal_to (@2,
2627 wi::max_value (TYPE_PRECISION (type), SIGNED))
2628 /* Let's ignore the @0 -1 and @2 min case. */
2629 : (expr_not_equal_to (@2,
2630 wi::min_value (TYPE_PRECISION (type), SIGNED))
2631 && expr_not_equal_to (@2,
2632 wi::min_value (TYPE_PRECISION (type), SIGNED)
2633 + 1))))))
c1bbe5b3 2634 && single_use (@3))
5b55e6e3
RB
2635 (mult (plusminus { build_one_cst (type); } @2) @0)))
2636 (simplify
c1bbe5b3
RB
2637 (plusminus (mult:c@3 @0 @2) @0)
2638 (if ((!ANY_INTEGRAL_TYPE_P (type)
2639 || TYPE_OVERFLOW_WRAPS (type)
a213ab38
JJ
2640 /* For @0*@2 + @0 this transformation would introduce UB
2641 (where there was none before) for @0 in [-1,0] and @2 max.
2642 For @0*@2 - @0 this transformation would introduce UB
2643 for @0 0 and @2 min. */
c1bbe5b3 2644 || (INTEGRAL_TYPE_P (type)
a213ab38
JJ
2645 && ((tree_expr_nonzero_p (@0)
2646 && (plusminus == MINUS_EXPR
2647 || expr_not_equal_to (@0,
2648 wi::minus_one (TYPE_PRECISION (type)))))
2649 || expr_not_equal_to (@2,
2650 (plusminus == PLUS_EXPR
2651 ? wi::max_value (TYPE_PRECISION (type), SIGNED)
2652 : wi::min_value (TYPE_PRECISION (type), SIGNED))))))
c1bbe5b3 2653 && single_use (@3))
5b55e6e3 2654 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
cc7b5acf 2655
144aee70
JJ
2656#if GIMPLE
2657/* Canonicalize X + (X << C) into X * (1 + (1 << C)) and
2658 (X << C1) + (X << C2) into X * ((1 << C1) + (1 << C2)). */
2659(simplify
2660 (plus:c @0 (lshift:s @0 INTEGER_CST@1))
2661 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2662 && tree_fits_uhwi_p (@1)
2663 && tree_to_uhwi (@1) < element_precision (type))
2664 (with { tree t = type;
2665 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
2666 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1),
2667 element_precision (type));
2668 w += 1;
2669 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
2670 : t, w);
2671 cst = build_uniform_cst (t, cst); }
2672 (convert (mult (convert:t @0) { cst; })))))
2673(simplify
2674 (plus (lshift:s @0 INTEGER_CST@1) (lshift:s @0 INTEGER_CST@2))
2675 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2676 && tree_fits_uhwi_p (@1)
2677 && tree_to_uhwi (@1) < element_precision (type)
2678 && tree_fits_uhwi_p (@2)
2679 && tree_to_uhwi (@2) < element_precision (type))
2680 (with { tree t = type;
2681 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
2682 unsigned int prec = element_precision (type);
2683 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), prec);
2684 w += wi::set_bit_in_zero (tree_to_uhwi (@2), prec);
2685 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
2686 : t, w);
2687 cst = build_uniform_cst (t, cst); }
2688 (convert (mult (convert:t @0) { cst; })))))
2689#endif
2690
0122e8e5 2691/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 2692
c6cfa2bf 2693(for minmax (min max FMIN_ALL FMAX_ALL)
a7f24614
RB
2694 (simplify
2695 (minmax @0 @0)
2696 @0))
4a334cba
RS
2697/* min(max(x,y),y) -> y. */
2698(simplify
2699 (min:c (max:c @0 @1) @1)
2700 @1)
2701/* max(min(x,y),y) -> y. */
2702(simplify
2703 (max:c (min:c @0 @1) @1)
2704 @1)
d657e995
RB
2705/* max(a,-a) -> abs(a). */
2706(simplify
2707 (max:c @0 (negate @0))
2708 (if (TREE_CODE (type) != COMPLEX_TYPE
2709 && (! ANY_INTEGRAL_TYPE_P (type)
2710 || TYPE_OVERFLOW_UNDEFINED (type)))
2711 (abs @0)))
54f84ca9
RB
2712/* min(a,-a) -> -abs(a). */
2713(simplify
2714 (min:c @0 (negate @0))
2715 (if (TREE_CODE (type) != COMPLEX_TYPE
2716 && (! ANY_INTEGRAL_TYPE_P (type)
2717 || TYPE_OVERFLOW_UNDEFINED (type)))
2718 (negate (abs @0))))
a7f24614
RB
2719(simplify
2720 (min @0 @1)
2c2870a1
MG
2721 (switch
2722 (if (INTEGRAL_TYPE_P (type)
2723 && TYPE_MIN_VALUE (type)
2724 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2725 @1)
2726 (if (INTEGRAL_TYPE_P (type)
2727 && TYPE_MAX_VALUE (type)
2728 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2729 @0)))
a7f24614
RB
2730(simplify
2731 (max @0 @1)
2c2870a1
MG
2732 (switch
2733 (if (INTEGRAL_TYPE_P (type)
2734 && TYPE_MAX_VALUE (type)
2735 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2736 @1)
2737 (if (INTEGRAL_TYPE_P (type)
2738 && TYPE_MIN_VALUE (type)
2739 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2740 @0)))
ad6e4ba8 2741
182f37c9
N
2742/* max (a, a + CST) -> a + CST where CST is positive. */
2743/* max (a, a + CST) -> a where CST is negative. */
2744(simplify
2745 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2746 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2747 (if (tree_int_cst_sgn (@1) > 0)
2748 @2
2749 @0)))
2750
2751/* min (a, a + CST) -> a where CST is positive. */
2752/* min (a, a + CST) -> a + CST where CST is negative. */
2753(simplify
2754 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2755 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2756 (if (tree_int_cst_sgn (@1) > 0)
2757 @0
2758 @2)))
2759
ad6e4ba8
BC
2760/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2761 and the outer convert demotes the expression back to x's type. */
2762(for minmax (min max)
2763 (simplify
2764 (convert (minmax@0 (convert @1) INTEGER_CST@2))
ebf41734
BC
2765 (if (INTEGRAL_TYPE_P (type)
2766 && types_match (@1, type) && int_fits_type_p (@2, type)
ad6e4ba8
BC
2767 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2768 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2769 (minmax @1 (convert @2)))))
2770
c6cfa2bf 2771(for minmax (FMIN_ALL FMAX_ALL)
0122e8e5
RS
2772 /* If either argument is NaN, return the other one. Avoid the
2773 transformation if we get (and honor) a signalling NaN. */
2774 (simplify
2775 (minmax:c @0 REAL_CST@1)
2776 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2777 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2778 @0)))
2779/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2780 functions to return the numeric arg if the other one is NaN.
2781 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2782 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2783 worry about it either. */
2784(if (flag_finite_math_only)
2785 (simplify
c6cfa2bf 2786 (FMIN_ALL @0 @1)
0122e8e5 2787 (min @0 @1))
4119b2eb 2788 (simplify
c6cfa2bf 2789 (FMAX_ALL @0 @1)
0122e8e5 2790 (max @0 @1)))
ce0e66ff 2791/* min (-A, -B) -> -max (A, B) */
c6cfa2bf
MM
2792(for minmax (min max FMIN_ALL FMAX_ALL)
2793 maxmin (max min FMAX_ALL FMIN_ALL)
ce0e66ff
MG
2794 (simplify
2795 (minmax (negate:s@2 @0) (negate:s@3 @1))
2796 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2797 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2798 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2799 (negate (maxmin @0 @1)))))
2800/* MIN (~X, ~Y) -> ~MAX (X, Y)
2801 MAX (~X, ~Y) -> ~MIN (X, Y) */
2802(for minmax (min max)
2803 maxmin (max min)
2804 (simplify
2805 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2806 (bit_not (maxmin @0 @1))))
a7f24614 2807
b4817bd6
MG
2808/* MIN (X, Y) == X -> X <= Y */
2809(for minmax (min min max max)
2810 cmp (eq ne eq ne )
2811 out (le gt ge lt )
2812 (simplify
2813 (cmp:c (minmax:c @0 @1) @0)
2814 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2815 (out @0 @1))))
2816/* MIN (X, 5) == 0 -> X == 0
2817 MIN (X, 5) == 7 -> false */
2818(for cmp (eq ne)
2819 (simplify
2820 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2821 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2822 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2823 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2824 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2825 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2826 (cmp @0 @2)))))
2827(for cmp (eq ne)
2828 (simplify
2829 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90
RS
2830 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2831 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6 2832 { constant_boolean_node (cmp == NE_EXPR, type); }
8e6cdc90
RS
2833 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2834 TYPE_SIGN (TREE_TYPE (@0))))
b4817bd6
MG
2835 (cmp @0 @2)))))
2836/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2837(for minmax (min min max max min min max max )
2838 cmp (lt le gt ge gt ge lt le )
2839 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2840 (simplify
2841 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2842 (comb (cmp @0 @2) (cmp @1 @2))))
2843
49647b7b
JJ
2844/* Undo fancy way of writing max/min or other ?: expressions,
2845 like a - ((a - b) & -(a < b)), in this case into (a < b) ? b : a.
2846 People normally use ?: and that is what we actually try to optimize. */
2847(for cmp (simple_comparison)
2848 (simplify
2849 (minus @0 (bit_and:c (minus @0 @1)
2850 (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
2851 (if (INTEGRAL_TYPE_P (type)
2852 && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2853 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2854 && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2855 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
187dd955
JJ
2856 || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2857 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
49647b7b
JJ
2858 (cond (cmp @2 @3) @1 @0)))
2859 (simplify
2860 (plus:c @0 (bit_and:c (minus @1 @0)
2861 (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
1595a1cb
JJ
2862 (if (INTEGRAL_TYPE_P (type)
2863 && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2864 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2865 && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2866 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
2867 || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2868 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
2869 (cond (cmp @2 @3) @1 @0)))
2870 /* Similarly with ^ instead of - though in that case with :c. */
2871 (simplify
2872 (bit_xor:c @0 (bit_and:c (bit_xor:c @0 @1)
2873 (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
49647b7b
JJ
2874 (if (INTEGRAL_TYPE_P (type)
2875 && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2876 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2877 && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2878 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
187dd955
JJ
2879 || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2880 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
49647b7b
JJ
2881 (cond (cmp @2 @3) @1 @0))))
2882
a7f24614
RB
2883/* Simplifications of shift and rotates. */
2884
2885(for rotate (lrotate rrotate)
2886 (simplify
2887 (rotate integer_all_onesp@0 @1)
2888 @0))
2889
2890/* Optimize -1 >> x for arithmetic right shifts. */
2891(simplify
2892 (rshift integer_all_onesp@0 @1)
2893 (if (!TYPE_UNSIGNED (type)
2894 && tree_expr_nonnegative_p (@1))
2895 @0))
2896
12085390
N
2897/* Optimize (x >> c) << c into x & (-1<<c). */
2898(simplify
f26916c2 2899 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1)
8e6cdc90 2900 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
f26916c2
JJ
2901 /* It doesn't matter if the right shift is arithmetic or logical. */
2902 (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1))))
2903
2904(simplify
2905 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1)
2906 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))
2907 /* Allow intermediate conversion to integral type with whatever sign, as
2908 long as the low TYPE_PRECISION (type)
2909 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */
2910 && INTEGRAL_TYPE_P (type)
2911 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2912 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2913 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
2914 && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type)
2915 || wi::geu_p (wi::to_wide (@1),
2916 TYPE_PRECISION (type)
2917 - TYPE_PRECISION (TREE_TYPE (@2)))))
2918 (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
12085390
N
2919
2920/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2921 types. */
2922(simplify
2923 (rshift (lshift @0 INTEGER_CST@1) @1)
2924 (if (TYPE_UNSIGNED (type)
8e6cdc90 2925 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
12085390
N
2926 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2927
a7f24614
RB
2928(for shiftrotate (lrotate rrotate lshift rshift)
2929 (simplify
2930 (shiftrotate @0 integer_zerop)
2931 (non_lvalue @0))
2932 (simplify
2933 (shiftrotate integer_zerop@0 @1)
2934 @0)
2935 /* Prefer vector1 << scalar to vector1 << vector2
2936 if vector2 is uniform. */
2937 (for vec (VECTOR_CST CONSTRUCTOR)
2938 (simplify
2939 (shiftrotate @0 vec@1)
2940 (with { tree tem = uniform_vector_p (@1); }
2941 (if (tem)
2942 (shiftrotate @0 { tem; }))))))
2943
165ba2e9
JJ
2944/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2945 Y is 0. Similarly for X >> Y. */
2946#if GIMPLE
2947(for shift (lshift rshift)
2948 (simplify
2949 (shift @0 SSA_NAME@1)
2950 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2951 (with {
2952 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2953 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2954 }
2955 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2956 @0)))))
2957#endif
2958
a7f24614
RB
2959/* Rewrite an LROTATE_EXPR by a constant into an
2960 RROTATE_EXPR by a new constant. */
2961(simplify
2962 (lrotate @0 INTEGER_CST@1)
23f27839 2963 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
2964 build_int_cst (TREE_TYPE (@1),
2965 element_precision (type)), @1); }))
2966
14ea9f92
RB
2967/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2968(for op (lrotate rrotate rshift lshift)
2969 (simplify
2970 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2971 (with { unsigned int prec = element_precision (type); }
8e6cdc90
RS
2972 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2973 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2974 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2975 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
a1488398
RS
2976 (with { unsigned int low = (tree_to_uhwi (@1)
2977 + tree_to_uhwi (@2)); }
14ea9f92
RB
2978 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2979 being well defined. */
2980 (if (low >= prec)
2981 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 2982 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 2983 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
2984 { build_zero_cst (type); }
2985 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2986 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
2987
2988
01ada710
MP
2989/* ((1 << A) & 1) != 0 -> A == 0
2990 ((1 << A) & 1) == 0 -> A != 0 */
2991(for cmp (ne eq)
2992 icmp (eq ne)
2993 (simplify
2994 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2995 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 2996
f2e609c3
MP
2997/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2998 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2999 if CST2 != 0. */
3000(for cmp (ne eq)
3001 (simplify
3002 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
8e6cdc90 3003 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
f2e609c3
MP
3004 (if (cand < 0
3005 || (!integer_zerop (@2)
8e6cdc90 3006 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
8fdc6c67
RB
3007 { constant_boolean_node (cmp == NE_EXPR, type); }
3008 (if (!integer_zerop (@2)
8e6cdc90 3009 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
8fdc6c67 3010 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 3011
1ffbaa3f
RB
3012/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
3013 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
3014 if the new mask might be further optimized. */
3015(for shift (lshift rshift)
3016 (simplify
44fc0a51
RB
3017 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
3018 INTEGER_CST@2)
1ffbaa3f
RB
3019 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
3020 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
3021 && tree_fits_uhwi_p (@1)
3022 && tree_to_uhwi (@1) > 0
3023 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
3024 (with
3025 {
3026 unsigned int shiftc = tree_to_uhwi (@1);
3027 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
3028 unsigned HOST_WIDE_INT newmask, zerobits = 0;
3029 tree shift_type = TREE_TYPE (@3);
3030 unsigned int prec;
3031
3032 if (shift == LSHIFT_EXPR)
fecfbfa4 3033 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f 3034 else if (shift == RSHIFT_EXPR
2be65d9e 3035 && type_has_mode_precision_p (shift_type))
1ffbaa3f
RB
3036 {
3037 prec = TYPE_PRECISION (TREE_TYPE (@3));
3038 tree arg00 = @0;
3039 /* See if more bits can be proven as zero because of
3040 zero extension. */
3041 if (@3 != @0
3042 && TYPE_UNSIGNED (TREE_TYPE (@0)))
3043 {
3044 tree inner_type = TREE_TYPE (@0);
2be65d9e 3045 if (type_has_mode_precision_p (inner_type)
1ffbaa3f
RB
3046 && TYPE_PRECISION (inner_type) < prec)
3047 {
3048 prec = TYPE_PRECISION (inner_type);
3049 /* See if we can shorten the right shift. */
3050 if (shiftc < prec)
3051 shift_type = inner_type;
3052 /* Otherwise X >> C1 is all zeros, so we'll optimize
3053 it into (X, 0) later on by making sure zerobits
3054 is all ones. */
3055 }
3056 }
dd4786fe 3057 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
3058 if (shiftc < prec)
3059 {
3060 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
3061 zerobits <<= prec - shiftc;
3062 }
3063 /* For arithmetic shift if sign bit could be set, zerobits
3064 can contain actually sign bits, so no transformation is
3065 possible, unless MASK masks them all away. In that
3066 case the shift needs to be converted into logical shift. */
3067 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
3068 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
3069 {
3070 if ((mask & zerobits) == 0)
3071 shift_type = unsigned_type_for (TREE_TYPE (@3));
3072 else
3073 zerobits = 0;
3074 }
3075 }
3076 }
3077 /* ((X << 16) & 0xff00) is (X, 0). */
3078 (if ((mask & zerobits) == mask)
8fdc6c67
RB
3079 { build_int_cst (type, 0); }
3080 (with { newmask = mask | zerobits; }
3081 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
3082 (with
3083 {
3084 /* Only do the transformation if NEWMASK is some integer
3085 mode's mask. */
3086 for (prec = BITS_PER_UNIT;
3087 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 3088 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
3089 break;
3090 }
3091 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 3092 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
3093 (with
3094 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
3095 (if (!tree_int_cst_equal (newmaskt, @2))
3096 (if (shift_type != TREE_TYPE (@3))
3097 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
3098 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 3099
84ff66b8
AV
3100/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
3101 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 3102(for shift (lshift rshift)
84ff66b8
AV
3103 (for bit_op (bit_and bit_xor bit_ior)
3104 (simplify
3105 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
3106 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
3107 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
3108 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 3109
ad1d92ab
MM
3110/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
3111(simplify
3112 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
3113 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
3114 && (element_precision (TREE_TYPE (@0))
3115 <= element_precision (TREE_TYPE (@1))
3116 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
3117 (with
3118 { tree shift_type = TREE_TYPE (@0); }
3119 (convert (rshift (convert:shift_type @1) @2)))))
3120
3121/* ~(~X >>r Y) -> X >>r Y
3122 ~(~X <<r Y) -> X <<r Y */
3123(for rotate (lrotate rrotate)
3124 (simplify
3125 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
3126 (if ((element_precision (TREE_TYPE (@0))
3127 <= element_precision (TREE_TYPE (@1))
3128 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
3129 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
3130 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
3131 (with
3132 { tree rotate_type = TREE_TYPE (@0); }
3133 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 3134
d4573ffe
RB
3135/* Simplifications of conversions. */
3136
3137/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 3138(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
3139 (simplify
3140 (cvt @0)
3141 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
3142 || (GENERIC && type == TREE_TYPE (@0)))
3143 @0)))
3144
3145/* Contract view-conversions. */
3146(simplify
3147 (view_convert (view_convert @0))
3148 (view_convert @0))
3149
3150/* For integral conversions with the same precision or pointer
3151 conversions use a NOP_EXPR instead. */
3152(simplify
3153 (view_convert @0)
3154 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3155 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3156 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
3157 (convert @0)))
3158
bce8ef71
MG
3159/* Strip inner integral conversions that do not change precision or size, or
3160 zero-extend while keeping the same size (for bool-to-char). */
d4573ffe
RB
3161(simplify
3162 (view_convert (convert@0 @1))
3163 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3164 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
bce8ef71
MG
3165 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
3166 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
3167 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
3168 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
d4573ffe
RB
3169 (view_convert @1)))
3170
f469220d
RB
3171/* Simplify a view-converted empty constructor. */
3172(simplify
3173 (view_convert CONSTRUCTOR@0)
3174 (if (TREE_CODE (@0) != SSA_NAME
3175 && CONSTRUCTOR_NELTS (@0) == 0)
3176 { build_zero_cst (type); }))
3177
d4573ffe
RB
3178/* Re-association barriers around constants and other re-association
3179 barriers can be removed. */
3180(simplify
3181 (paren CONSTANT_CLASS_P@0)
3182 @0)
3183(simplify
3184 (paren (paren@1 @0))
3185 @1)
1e51d0a2
RB
3186
3187/* Handle cases of two conversions in a row. */
3188(for ocvt (convert float fix_trunc)
3189 (for icvt (convert float)
3190 (simplify
3191 (ocvt (icvt@1 @0))
3192 (with
3193 {
3194 tree inside_type = TREE_TYPE (@0);
3195 tree inter_type = TREE_TYPE (@1);
3196 int inside_int = INTEGRAL_TYPE_P (inside_type);
3197 int inside_ptr = POINTER_TYPE_P (inside_type);
3198 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 3199 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
3200 unsigned int inside_prec = TYPE_PRECISION (inside_type);
3201 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
3202 int inter_int = INTEGRAL_TYPE_P (inter_type);
3203 int inter_ptr = POINTER_TYPE_P (inter_type);
3204 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 3205 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
3206 unsigned int inter_prec = TYPE_PRECISION (inter_type);
3207 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
3208 int final_int = INTEGRAL_TYPE_P (type);
3209 int final_ptr = POINTER_TYPE_P (type);
3210 int final_float = FLOAT_TYPE_P (type);
09240451 3211 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
3212 unsigned int final_prec = TYPE_PRECISION (type);
3213 int final_unsignedp = TYPE_UNSIGNED (type);
3214 }
64d3a1f0
RB
3215 (switch
3216 /* In addition to the cases of two conversions in a row
3217 handled below, if we are converting something to its own
3218 type via an object of identical or wider precision, neither
3219 conversion is needed. */
3220 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
3221 || (GENERIC
3222 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
3223 && (((inter_int || inter_ptr) && final_int)
3224 || (inter_float && final_float))
3225 && inter_prec >= final_prec)
3226 (ocvt @0))
3227
3228 /* Likewise, if the intermediate and initial types are either both
3229 float or both integer, we don't need the middle conversion if the
3230 former is wider than the latter and doesn't change the signedness
3231 (for integers). Avoid this if the final type is a pointer since
36088299 3232 then we sometimes need the middle conversion. */
64d3a1f0
RB
3233 (if (((inter_int && inside_int) || (inter_float && inside_float))
3234 && (final_int || final_float)
3235 && inter_prec >= inside_prec
36088299 3236 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
3237 (ocvt @0))
3238
3239 /* If we have a sign-extension of a zero-extended value, we can
3240 replace that by a single zero-extension. Likewise if the
3241 final conversion does not change precision we can drop the
3242 intermediate conversion. */
3243 (if (inside_int && inter_int && final_int
3244 && ((inside_prec < inter_prec && inter_prec < final_prec
3245 && inside_unsignedp && !inter_unsignedp)
3246 || final_prec == inter_prec))
3247 (ocvt @0))
3248
3249 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
3250 - some conversion is floating-point (overstrict for now), or
3251 - some conversion is a vector (overstrict for now), or
3252 - the intermediate type is narrower than both initial and
3253 final, or
3254 - the intermediate type and innermost type differ in signedness,
3255 and the outermost type is wider than the intermediate, or
3256 - the initial type is a pointer type and the precisions of the
3257 intermediate and final types differ, or
3258 - the final type is a pointer type and the precisions of the
3259 initial and intermediate types differ. */
64d3a1f0
RB
3260 (if (! inside_float && ! inter_float && ! final_float
3261 && ! inside_vec && ! inter_vec && ! final_vec
3262 && (inter_prec >= inside_prec || inter_prec >= final_prec)
3263 && ! (inside_int && inter_int
3264 && inter_unsignedp != inside_unsignedp
3265 && inter_prec < final_prec)
3266 && ((inter_unsignedp && inter_prec > inside_prec)
3267 == (final_unsignedp && final_prec > inter_prec))
3268 && ! (inside_ptr && inter_prec != final_prec)
36088299 3269 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
3270 (ocvt @0))
3271
3272 /* A truncation to an unsigned type (a zero-extension) should be
3273 canonicalized as bitwise and of a mask. */
1d510e04
JJ
3274 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
3275 && final_int && inter_int && inside_int
64d3a1f0
RB
3276 && final_prec == inside_prec
3277 && final_prec > inter_prec
3278 && inter_unsignedp)
3279 (convert (bit_and @0 { wide_int_to_tree
3280 (inside_type,
3281 wi::mask (inter_prec, false,
3282 TYPE_PRECISION (inside_type))); })))
3283
3284 /* If we are converting an integer to a floating-point that can
3285 represent it exactly and back to an integer, we can skip the
3286 floating-point conversion. */
3287 (if (GIMPLE /* PR66211 */
3288 && inside_int && inter_float && final_int &&
3289 (unsigned) significand_size (TYPE_MODE (inter_type))
3290 >= inside_prec - !inside_unsignedp)
3291 (convert @0)))))))
ea2042ba
RB
3292
3293/* If we have a narrowing conversion to an integral type that is fed by a
3294 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
3295 masks off bits outside the final type (and nothing else). */
3296(simplify
3297 (convert (bit_and @0 INTEGER_CST@1))
3298 (if (INTEGRAL_TYPE_P (type)
3299 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3300 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
3301 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
3302 TYPE_PRECISION (type)), 0))
3303 (convert @0)))
a25454ea
RB
3304
3305
3306/* (X /[ex] A) * A -> X. */
3307(simplify
2eef1fc1
RB
3308 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
3309 (convert @0))
eaeba53a 3310
839d0860
RB
3311/* Simplify (A / B) * B + (A % B) -> A. */
3312(for div (trunc_div ceil_div floor_div round_div)
3313 mod (trunc_mod ceil_mod floor_mod round_mod)
3314 (simplify
3315 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
3316 @0))
3317
0036218b
MG
3318/* ((X /[ex] A) +- B) * A --> X +- A * B. */
3319(for op (plus minus)
3320 (simplify
3321 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
3322 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
3323 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
3324 (with
3325 {
3326 wi::overflow_type overflow;
3327 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
3328 TYPE_SIGN (type), &overflow);
3329 }
3330 (if (types_match (type, TREE_TYPE (@2))
3331 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
3332 (op @0 { wide_int_to_tree (type, mul); })
3333 (with { tree utype = unsigned_type_for (type); }
3334 (convert (op (convert:utype @0)
3335 (mult (convert:utype @1) (convert:utype @2))))))))))
3336
a7f24614
RB
3337/* Canonicalization of binary operations. */
3338
3339/* Convert X + -C into X - C. */
3340(simplify
3341 (plus @0 REAL_CST@1)
3342 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 3343 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
3344 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
3345 (minus @0 { tem; })))))
3346
6b6aa8d3 3347/* Convert x+x into x*2. */
a7f24614
RB
3348(simplify
3349 (plus @0 @0)
3350 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
3351 (mult @0 { build_real (type, dconst2); })
3352 (if (INTEGRAL_TYPE_P (type))
3353 (mult @0 { build_int_cst (type, 2); }))))
a7f24614 3354
406520e2 3355/* 0 - X -> -X. */
a7f24614
RB
3356(simplify
3357 (minus integer_zerop @1)
3358 (negate @1))
406520e2
MG
3359(simplify
3360 (pointer_diff integer_zerop @1)
3361 (negate (convert @1)))
a7f24614
RB
3362
3363/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
3364 ARG0 is zero and X + ARG0 reduces to X, since that would mean
3365 (-ARG1 + ARG0) reduces to -ARG1. */
3366(simplify
3367 (minus real_zerop@0 @1)
3368 (if (fold_real_zero_addition_p (type, @0, 0))
3369 (negate @1)))
3370
3371/* Transform x * -1 into -x. */
3372(simplify
3373 (mult @0 integer_minus_onep)
3374 (negate @0))
eaeba53a 3375
b771c609
AM
3376/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
3377 signed overflow for CST != 0 && CST != -1. */
3378(simplify
b46ebc6c 3379 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
b771c609 3380 (if (TREE_CODE (@2) != INTEGER_CST
b46ebc6c 3381 && single_use (@3)
b771c609
AM
3382 && !integer_zerop (@1) && !integer_minus_onep (@1))
3383 (mult (mult @0 @2) @1)))
3384
96285749
RS
3385/* True if we can easily extract the real and imaginary parts of a complex
3386 number. */
3387(match compositional_complex
3388 (convert? (complex @0 @1)))
3389
eaeba53a
RB
3390/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
3391(simplify
3392 (complex (realpart @0) (imagpart @0))
3393 @0)
3394(simplify
3395 (realpart (complex @0 @1))
3396 @0)
3397(simplify
3398 (imagpart (complex @0 @1))
3399 @1)
83633539 3400
77c028c5
MG
3401/* Sometimes we only care about half of a complex expression. */
3402(simplify
3403 (realpart (convert?:s (conj:s @0)))
3404 (convert (realpart @0)))
3405(simplify
3406 (imagpart (convert?:s (conj:s @0)))
3407 (convert (negate (imagpart @0))))
3408(for part (realpart imagpart)
3409 (for op (plus minus)
3410 (simplify
3411 (part (convert?:s@2 (op:s @0 @1)))
3412 (convert (op (part @0) (part @1))))))
3413(simplify
3414 (realpart (convert?:s (CEXPI:s @0)))
3415 (convert (COS @0)))
3416(simplify
3417 (imagpart (convert?:s (CEXPI:s @0)))
3418 (convert (SIN @0)))
3419
3420/* conj(conj(x)) -> x */
3421(simplify
3422 (conj (convert? (conj @0)))
3423 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
3424 (convert @0)))
3425
3426/* conj({x,y}) -> {x,-y} */
3427(simplify
3428 (conj (convert?:s (complex:s @0 @1)))
3429 (with { tree itype = TREE_TYPE (type); }
3430 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
3431
3432/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
3433(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
3434 (simplify
3435 (bswap (bswap @0))
3436 @0)
3437 (simplify
3438 (bswap (bit_not (bswap @0)))
3439 (bit_not @0))
3440 (for bitop (bit_xor bit_ior bit_and)
3441 (simplify
3442 (bswap (bitop:c (bswap @0) @1))
3443 (bitop @0 (bswap @1)))))
96994de0
RB
3444
3445
3446/* Combine COND_EXPRs and VEC_COND_EXPRs. */
3447
3448/* Simplify constant conditions.
3449 Only optimize constant conditions when the selected branch
3450 has the same type as the COND_EXPR. This avoids optimizing
3451 away "c ? x : throw", where the throw has a void type.
3452 Note that we cannot throw away the fold-const.c variant nor
3453 this one as we depend on doing this transform before possibly
3454 A ? B : B -> B triggers and the fold-const.c one can optimize
3455 0 ? A : B to B even if A has side-effects. Something
3456 genmatch cannot handle. */
3457(simplify
3458 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
3459 (if (integer_zerop (@0))
3460 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
3461 @2)
3462 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
3463 @1)))
96994de0
RB
3464(simplify
3465 (vec_cond VECTOR_CST@0 @1 @2)
3466 (if (integer_all_onesp (@0))
8fdc6c67
RB
3467 @1
3468 (if (integer_zerop (@0))
3469 @2)))
96994de0 3470
229752af
MG
3471#if GIMPLE
3472/* Sink unary operations to branches, but only if we do fold both. */
34a13a52
MG
3473(for op (negate bit_not abs absu)
3474 (simplify
229752af
MG
3475 (op (vec_cond:s @0 @1 @2))
3476 (vec_cond @0 (op! @1) (op! @2))))
3477
3478/* Sink binary operation to branches, but only if we can fold it. */
3479(for op (tcc_comparison plus minus mult bit_and bit_ior bit_xor
3480 rdiv trunc_div ceil_div floor_div round_div
3481 trunc_mod ceil_mod floor_mod round_mod min max)
3482/* (c ? a : b) op (c ? d : e) --> c ? (a op d) : (b op e) */
3483 (simplify
3484 (op (vec_cond:s @0 @1 @2) (vec_cond:s @0 @3 @4))
3485 (vec_cond @0 (op! @1 @3) (op! @2 @4)))
3486
3487/* (c ? a : b) op d --> c ? (a op d) : (b op d) */
3488 (simplify
3489 (op (vec_cond:s @0 @1 @2) @3)
3490 (vec_cond @0 (op! @1 @3) (op! @2 @3)))
3491 (simplify
3492 (op @3 (vec_cond:s @0 @1 @2))
3493 (vec_cond @0 (op! @3 @1) (op! @3 @2))))
3494#endif
3495
a1ee6d50
MG
3496/* (v ? w : 0) ? a : b is just (v & w) ? a : b
3497 Currently disabled after pass lvec because ARM understands
3498 VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
229752af
MG
3499(simplify
3500 (vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
a1ee6d50 3501 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
229752af
MG
3502 (vec_cond (bit_and @0 @3) @1 @2)))
3503(simplify
3504 (vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
a1ee6d50 3505 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
229752af
MG
3506 (vec_cond (bit_ior @0 @3) @1 @2)))
3507(simplify
3508 (vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
a1ee6d50 3509 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
229752af
MG
3510 (vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
3511(simplify
3512 (vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
a1ee6d50 3513 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
229752af
MG
3514 (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
3515
3516/* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
3517(simplify
3518 (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
a1ee6d50 3519 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
229752af
MG
3520 (vec_cond (bit_and @0 @1) @2 @3)))
3521(simplify
3522 (vec_cond @0 @2 (vec_cond:s @1 @2 @3))
a1ee6d50 3523 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
229752af
MG
3524 (vec_cond (bit_ior @0 @1) @2 @3)))
3525(simplify
3526 (vec_cond @0 (vec_cond:s @1 @2 @3) @2)
a1ee6d50 3527 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
229752af
MG
3528 (vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
3529(simplify
3530 (vec_cond @0 @3 (vec_cond:s @1 @2 @3))
a1ee6d50 3531 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
229752af 3532 (vec_cond (bit_and (bit_not @0) @1) @2 @3)))
34a13a52 3533
10843f83
RB
3534/* Canonicalize mask ? { 0, ... } : { -1, ...} to ~mask if the mask
3535 types are compatible. */
3536(simplify
3537 (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2)
3538 (if (VECTOR_BOOLEAN_TYPE_P (type)
3539 && types_match (type, TREE_TYPE (@0)))
3540 (if (integer_zerop (@1) && integer_all_onesp (@2))
3541 (bit_not @0)
3542 (if (integer_all_onesp (@1) && integer_zerop (@2))
3543 @0))))
3544
b5481987
BC
3545/* Simplification moved from fold_cond_expr_with_comparison. It may also
3546 be extended. */
e2535011
BC
3547/* This pattern implements two kinds simplification:
3548
3549 Case 1)
3550 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
b5481987
BC
3551 1) Conversions are type widening from smaller type.
3552 2) Const c1 equals to c2 after canonicalizing comparison.
3553 3) Comparison has tree code LT, LE, GT or GE.
3554 This specific pattern is needed when (cmp (convert x) c) may not
3555 be simplified by comparison patterns because of multiple uses of
3556 x. It also makes sense here because simplifying across multiple
e2535011
BC
3557 referred var is always benefitial for complicated cases.
3558
3559 Case 2)
3560 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
3561(for cmp (lt le gt ge eq)
b5481987 3562 (simplify
ae22bc5d 3563 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
b5481987
BC
3564 (with
3565 {
3566 tree from_type = TREE_TYPE (@1);
3567 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
ae22bc5d 3568 enum tree_code code = ERROR_MARK;
b5481987 3569
ae22bc5d
BC
3570 if (INTEGRAL_TYPE_P (from_type)
3571 && int_fits_type_p (@2, from_type)
b5481987
BC
3572 && (types_match (c1_type, from_type)
3573 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
3574 && (TYPE_UNSIGNED (from_type)
3575 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
3576 && (types_match (c2_type, from_type)
3577 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
3578 && (TYPE_UNSIGNED (from_type)
3579 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
3580 {
ae22bc5d 3581 if (cmp != EQ_EXPR)
b5481987 3582 {
e2535011
BC
3583 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
3584 {
3585 /* X <= Y - 1 equals to X < Y. */
ae22bc5d 3586 if (cmp == LE_EXPR)
e2535011
BC
3587 code = LT_EXPR;
3588 /* X > Y - 1 equals to X >= Y. */
ae22bc5d 3589 if (cmp == GT_EXPR)
e2535011
BC
3590 code = GE_EXPR;
3591 }
3592 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
3593 {
3594 /* X < Y + 1 equals to X <= Y. */
ae22bc5d 3595 if (cmp == LT_EXPR)
e2535011
BC
3596 code = LE_EXPR;
3597 /* X >= Y + 1 equals to X > Y. */
ae22bc5d 3598 if (cmp == GE_EXPR)
e2535011
BC
3599 code = GT_EXPR;
3600 }
ae22bc5d
BC
3601 if (code != ERROR_MARK
3602 || wi::to_widest (@2) == wi::to_widest (@3))
e2535011 3603 {
ae22bc5d 3604 if (cmp == LT_EXPR || cmp == LE_EXPR)
e2535011 3605 code = MIN_EXPR;
ae22bc5d 3606 if (cmp == GT_EXPR || cmp == GE_EXPR)
e2535011
BC
3607 code = MAX_EXPR;
3608 }
b5481987 3609 }
e2535011 3610 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
ae22bc5d
BC
3611 else if (int_fits_type_p (@3, from_type))
3612 code = EQ_EXPR;
b5481987
BC
3613 }
3614 }
3615 (if (code == MAX_EXPR)
21aaaf1e 3616 (convert (max @1 (convert @2)))
b5481987 3617 (if (code == MIN_EXPR)
21aaaf1e 3618 (convert (min @1 (convert @2)))
e2535011 3619 (if (code == EQ_EXPR)
ae22bc5d 3620 (convert (cond (eq @1 (convert @3))
21aaaf1e 3621 (convert:from_type @3) (convert:from_type @2)))))))))
b5481987 3622
714445ae
BC
3623/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
3624
3625 1) OP is PLUS or MINUS.
3626 2) CMP is LT, LE, GT or GE.
3627 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
3628
3629 This pattern also handles special cases like:
3630
3631 A) Operand x is a unsigned to signed type conversion and c1 is
3632 integer zero. In this case,
3633 (signed type)x < 0 <=> x > MAX_VAL(signed type)
3634 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
3635 B) Const c1 may not equal to (C3 op' C2). In this case we also
3636 check equality for (c1+1) and (c1-1) by adjusting comparison
3637 code.
3638
3639 TODO: Though signed type is handled by this pattern, it cannot be
3640 simplified at the moment because C standard requires additional
3641 type promotion. In order to match&simplify it here, the IR needs
3642 to be cleaned up by other optimizers, i.e, VRP. */
3643(for op (plus minus)
3644 (for cmp (lt le gt ge)
3645 (simplify
3646 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
3647 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
3648 (if (types_match (from_type, to_type)
3649 /* Check if it is special case A). */
3650 || (TYPE_UNSIGNED (from_type)
3651 && !TYPE_UNSIGNED (to_type)
3652 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
3653 && integer_zerop (@1)
3654 && (cmp == LT_EXPR || cmp == GE_EXPR)))
3655 (with
3656 {
4a669ac3 3657 wi::overflow_type overflow = wi::OVF_NONE;
714445ae 3658 enum tree_code code, cmp_code = cmp;
8e6cdc90
RS
3659 wide_int real_c1;
3660 wide_int c1 = wi::to_wide (@1);
3661 wide_int c2 = wi::to_wide (@2);
3662 wide_int c3 = wi::to_wide (@3);
714445ae
BC
3663 signop sgn = TYPE_SIGN (from_type);
3664
3665 /* Handle special case A), given x of unsigned type:
3666 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
3667 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
3668 if (!types_match (from_type, to_type))
3669 {
3670 if (cmp_code == LT_EXPR)
3671 cmp_code = GT_EXPR;
3672 if (cmp_code == GE_EXPR)
3673 cmp_code = LE_EXPR;
3674 c1 = wi::max_value (to_type);
3675 }
3676 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
3677 compute (c3 op' c2) and check if it equals to c1 with op' being
3678 the inverted operator of op. Make sure overflow doesn't happen
3679 if it is undefined. */
3680 if (op == PLUS_EXPR)
3681 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3682 else
3683 real_c1 = wi::add (c3, c2, sgn, &overflow);
3684
3685 code = cmp_code;
3686 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3687 {
3688 /* Check if c1 equals to real_c1. Boundary condition is handled
3689 by adjusting comparison operation if necessary. */
3690 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3691 && !overflow)
3692 {
3693 /* X <= Y - 1 equals to X < Y. */
3694 if (cmp_code == LE_EXPR)
3695 code = LT_EXPR;
3696 /* X > Y - 1 equals to X >= Y. */
3697 if (cmp_code == GT_EXPR)
3698 code = GE_EXPR;
3699 }
3700 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3701 && !overflow)
3702 {
3703 /* X < Y + 1 equals to X <= Y. */
3704 if (cmp_code == LT_EXPR)
3705 code = LE_EXPR;
3706 /* X >= Y + 1 equals to X > Y. */
3707 if (cmp_code == GE_EXPR)
3708 code = GT_EXPR;
3709 }
3710 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3711 {
3712 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3713 code = MIN_EXPR;
3714 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3715 code = MAX_EXPR;
3716 }
3717 }
3718 }
3719 (if (code == MAX_EXPR)
3720 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3721 { wide_int_to_tree (from_type, c2); })
3722 (if (code == MIN_EXPR)
3723 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3724 { wide_int_to_tree (from_type, c2); })))))))))
3725
96994de0
RB
3726(for cnd (cond vec_cond)
3727 /* A ? B : (A ? X : C) -> A ? B : C. */
3728 (simplify
3729 (cnd @0 (cnd @0 @1 @2) @3)
3730 (cnd @0 @1 @3))
3731 (simplify
3732 (cnd @0 @1 (cnd @0 @2 @3))
3733 (cnd @0 @1 @3))
24a179f8
RB
3734 /* A ? B : (!A ? C : X) -> A ? B : C. */
3735 /* ??? This matches embedded conditions open-coded because genmatch
3736 would generate matching code for conditions in separate stmts only.
3737 The following is still important to merge then and else arm cases
3738 from if-conversion. */
3739 (simplify
3740 (cnd @0 @1 (cnd @2 @3 @4))
2c58d42c 3741 (if (inverse_conditions_p (@0, @2))
24a179f8
RB
3742 (cnd @0 @1 @3)))
3743 (simplify
3744 (cnd @0 (cnd @1 @2 @3) @4)
2c58d42c 3745 (if (inverse_conditions_p (@0, @1))
24a179f8 3746 (cnd @0 @3 @4)))
96994de0
RB
3747
3748 /* A ? B : B -> B. */
3749 (simplify
3750 (cnd @0 @1 @1)
09240451 3751 @1)
96994de0 3752
09240451
MG
3753 /* !A ? B : C -> A ? C : B. */
3754 (simplify
3755 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3756 (cnd @0 @2 @1)))
f84e7fd6 3757
a3ca1bc5
RB
3758/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3759 return all -1 or all 0 results. */
f43d102e
RS
3760/* ??? We could instead convert all instances of the vec_cond to negate,
3761 but that isn't necessarily a win on its own. */
3762(simplify
a3ca1bc5 3763 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3764 (if (VECTOR_TYPE_P (type)
928686b1
RS
3765 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3766 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3767 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3768 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3769 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 3770
a3ca1bc5 3771/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 3772(simplify
a3ca1bc5 3773 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 3774 (if (VECTOR_TYPE_P (type)
928686b1
RS
3775 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3776 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
f43d102e 3777 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 3778 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 3779 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 3780
2ee05f1e 3781
f84e7fd6
RB
3782/* Simplifications of comparisons. */
3783
24f1db9c
RB
3784/* See if we can reduce the magnitude of a constant involved in a
3785 comparison by changing the comparison code. This is a canonicalization
3786 formerly done by maybe_canonicalize_comparison_1. */
3787(for cmp (le gt)
3788 acmp (lt ge)
3789 (simplify
f06e47d7
JJ
3790 (cmp @0 uniform_integer_cst_p@1)
3791 (with { tree cst = uniform_integer_cst_p (@1); }
3792 (if (tree_int_cst_sgn (cst) == -1)
3793 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3794 wide_int_to_tree (TREE_TYPE (cst),
3795 wi::to_wide (cst)
3796 + 1)); })))))
24f1db9c
RB
3797(for cmp (ge lt)
3798 acmp (gt le)
3799 (simplify
f06e47d7
JJ
3800 (cmp @0 uniform_integer_cst_p@1)
3801 (with { tree cst = uniform_integer_cst_p (@1); }
3802 (if (tree_int_cst_sgn (cst) == 1)
3803 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3804 wide_int_to_tree (TREE_TYPE (cst),
3805 wi::to_wide (cst) - 1)); })))))
24f1db9c 3806
f84e7fd6
RB
3807/* We can simplify a logical negation of a comparison to the
3808 inverted comparison. As we cannot compute an expression
3809 operator using invert_tree_comparison we have to simulate
3810 that with expression code iteration. */
3811(for cmp (tcc_comparison)
3812 icmp (inverted_tcc_comparison)
3813 ncmp (inverted_tcc_comparison_with_nans)
3814 /* Ideally we'd like to combine the following two patterns
3815 and handle some more cases by using
3816 (logical_inverted_value (cmp @0 @1))
3817 here but for that genmatch would need to "inline" that.
3818 For now implement what forward_propagate_comparison did. */
3819 (simplify
3820 (bit_not (cmp @0 @1))
3821 (if (VECTOR_TYPE_P (type)
3822 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3823 /* Comparison inversion may be impossible for trapping math,
3824 invert_tree_comparison will tell us. But we can't use
3825 a computed operator in the replacement tree thus we have
3826 to play the trick below. */
3827 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3828 (cmp, HONOR_NANS (@0)); }
f84e7fd6 3829 (if (ic == icmp)
8fdc6c67
RB
3830 (icmp @0 @1)
3831 (if (ic == ncmp)
3832 (ncmp @0 @1))))))
f84e7fd6 3833 (simplify
09240451
MG
3834 (bit_xor (cmp @0 @1) integer_truep)
3835 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 3836 (cmp, HONOR_NANS (@0)); }
09240451 3837 (if (ic == icmp)
8fdc6c67
RB
3838 (icmp @0 @1)
3839 (if (ic == ncmp)
3840 (ncmp @0 @1))))))
e18c1d66 3841
2ee05f1e
RB
3842/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3843 ??? The transformation is valid for the other operators if overflow
3844 is undefined for the type, but performing it here badly interacts
3845 with the transformation in fold_cond_expr_with_comparison which
3846 attempts to synthetize ABS_EXPR. */
3847(for cmp (eq ne)
1af4ebf5
MG
3848 (for sub (minus pointer_diff)
3849 (simplify
3850 (cmp (sub@2 @0 @1) integer_zerop)
3851 (if (single_use (@2))
3852 (cmp @0 @1)))))
2ee05f1e
RB
3853
3854/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3855 signed arithmetic case. That form is created by the compiler
3856 often enough for folding it to be of value. One example is in
3857 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
3858(for cmp (simple_comparison)
3859 scmp (swapped_simple_comparison)
2ee05f1e 3860 (simplify
bc6e9db4 3861 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
3862 /* Handle unfolded multiplication by zero. */
3863 (if (integer_zerop (@1))
8fdc6c67
RB
3864 (cmp @1 @2)
3865 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
3866 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3867 && single_use (@3))
8fdc6c67
RB
3868 /* If @1 is negative we swap the sense of the comparison. */
3869 (if (tree_int_cst_sgn (@1) < 0)
3870 (scmp @0 @2)
3871 (cmp @0 @2))))))
03cc70b5 3872
ca2b8c08 3873/* For integral types with undefined overflow fold
28752261
MG
3874 x * C1 == C2 into x == C2 / C1 or false.
3875 If overflow wraps and C1 is odd, simplify to x == C2 / C1 in the ring
3876 Z / 2^n Z. */
ca2b8c08
MG
3877(for cmp (eq ne)
3878 (simplify
3879 (cmp (mult @0 INTEGER_CST@1) INTEGER_CST@2)
3880 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3881 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3882 && wi::to_wide (@1) != 0)
3883 (with { widest_int quot; }
3884 (if (wi::multiple_of_p (wi::to_widest (@2), wi::to_widest (@1),
3885 TYPE_SIGN (TREE_TYPE (@0)), &quot))
3886 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), quot); })
28752261
MG
3887 { constant_boolean_node (cmp == NE_EXPR, type); }))
3888 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3889 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3890 && (wi::bit_and (wi::to_wide (@1), 1) == 1))
3891 (cmp @0
3892 {
3893 tree itype = TREE_TYPE (@0);
3894 int p = TYPE_PRECISION (itype);
3895 wide_int m = wi::one (p + 1) << p;
3896 wide_int a = wide_int::from (wi::to_wide (@1), p + 1, UNSIGNED);
3897 wide_int i = wide_int::from (wi::mod_inv (a, m),
3898 p, TYPE_SIGN (itype));
3899 wide_int_to_tree (itype, wi::mul (i, wi::to_wide (@2)));
3900 })))))
ca2b8c08 3901
2ee05f1e
RB
3902/* Simplify comparison of something with itself. For IEEE
3903 floating-point, we can only do some of these simplifications. */
287f8f17 3904(for cmp (eq ge le)
2ee05f1e
RB
3905 (simplify
3906 (cmp @0 @0)
287f8f17 3907 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3908 || ! HONOR_NANS (@0))
287f8f17
RB
3909 { constant_boolean_node (true, type); }
3910 (if (cmp != EQ_EXPR)
3911 (eq @0 @0)))))
2ee05f1e
RB
3912(for cmp (ne gt lt)
3913 (simplify
3914 (cmp @0 @0)
3915 (if (cmp != NE_EXPR
3916 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 3917 || ! HONOR_NANS (@0))
2ee05f1e 3918 { constant_boolean_node (false, type); })))
b5d3d787
RB
3919(for cmp (unle unge uneq)
3920 (simplify
3921 (cmp @0 @0)
3922 { constant_boolean_node (true, type); }))
dd53d197
MG
3923(for cmp (unlt ungt)
3924 (simplify
3925 (cmp @0 @0)
3926 (unordered @0 @0)))
b5d3d787
RB
3927(simplify
3928 (ltgt @0 @0)
3929 (if (!flag_trapping_math)
3930 { constant_boolean_node (false, type); }))
2ee05f1e
RB
3931
3932/* Fold ~X op ~Y as Y op X. */
07cdc2b8 3933(for cmp (simple_comparison)
2ee05f1e 3934 (simplify
7fe996ba
RB
3935 (cmp (bit_not@2 @0) (bit_not@3 @1))
3936 (if (single_use (@2) && single_use (@3))
3937 (cmp @1 @0))))
2ee05f1e
RB
3938
3939/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
3940(for cmp (simple_comparison)
3941 scmp (swapped_simple_comparison)
2ee05f1e 3942 (simplify
7fe996ba
RB
3943 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3944 (if (single_use (@2)
3945 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
3946 (scmp @0 (bit_not @1)))))
3947
07cdc2b8
RB
3948(for cmp (simple_comparison)
3949 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3950 (simplify
3951 (cmp (convert@2 @0) (convert? @1))
3952 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3953 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3954 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3955 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3956 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3957 (with
3958 {
3959 tree type1 = TREE_TYPE (@1);
3960 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3961 {
3962 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3963 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3964 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3965 type1 = float_type_node;
3966 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3967 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3968 type1 = double_type_node;
3969 }
3970 tree newtype
3971 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
03cc70b5 3972 ? TREE_TYPE (@0) : type1);
07cdc2b8
RB
3973 }
3974 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3975 (cmp (convert:newtype @0) (convert:newtype @1))))))
03cc70b5 3976
07cdc2b8
RB
3977 (simplify
3978 (cmp @0 REAL_CST@1)
3979 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
3980 (switch
3981 /* a CMP (-0) -> a CMP 0 */
3982 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3983 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3984 /* x != NaN is always true, other ops are always false. */
3985 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3986 && ! HONOR_SNANS (@1))
3987 { constant_boolean_node (cmp == NE_EXPR, type); })
3988 /* Fold comparisons against infinity. */
3989 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3990 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3991 (with
3992 {
3993 REAL_VALUE_TYPE max;
3994 enum tree_code code = cmp;
3995 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3996 if (neg)
3997 code = swap_tree_comparison (code);
3998 }
3999 (switch
e96a5786 4000 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
64d3a1f0 4001 (if (code == GT_EXPR
e96a5786 4002 && !(HONOR_NANS (@0) && flag_trapping_math))
64d3a1f0
RB
4003 { constant_boolean_node (false, type); })
4004 (if (code == LE_EXPR)
e96a5786 4005 /* x <= +Inf is always true, if we don't care about NaNs. */
64d3a1f0
RB
4006 (if (! HONOR_NANS (@0))
4007 { constant_boolean_node (true, type); }
e96a5786
JM
4008 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
4009 an "invalid" exception. */
4010 (if (!flag_trapping_math)
4011 (eq @0 @0))))
4012 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
4013 for == this introduces an exception for x a NaN. */
4014 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
4015 || code == GE_EXPR)
64d3a1f0
RB
4016 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
4017 (if (neg)
4018 (lt @0 { build_real (TREE_TYPE (@0), max); })
4019 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
4020 /* x < +Inf is always equal to x <= DBL_MAX. */
4021 (if (code == LT_EXPR)
4022 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
4023 (if (neg)
4024 (ge @0 { build_real (TREE_TYPE (@0), max); })
4025 (le @0 { build_real (TREE_TYPE (@0), max); }))))
e96a5786
JM
4026 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
4027 an exception for x a NaN so use an unordered comparison. */
64d3a1f0
RB
4028 (if (code == NE_EXPR)
4029 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
4030 (if (! HONOR_NANS (@0))
4031 (if (neg)
4032 (ge @0 { build_real (TREE_TYPE (@0), max); })
4033 (le @0 { build_real (TREE_TYPE (@0), max); }))
4034 (if (neg)
e96a5786
JM
4035 (unge @0 { build_real (TREE_TYPE (@0), max); })
4036 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
07cdc2b8
RB
4037
4038 /* If this is a comparison of a real constant with a PLUS_EXPR
4039 or a MINUS_EXPR of a real constant, we can convert it into a
4040 comparison with a revised real constant as long as no overflow
4041 occurs when unsafe_math_optimizations are enabled. */
4042 (if (flag_unsafe_math_optimizations)
4043 (for op (plus minus)
4044 (simplify
4045 (cmp (op @0 REAL_CST@1) REAL_CST@2)
4046 (with
4047 {
4048 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
4049 TREE_TYPE (@1), @2, @1);
4050 }
f980c9a2 4051 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
4052 (cmp @0 { tem; }))))))
4053
4054 /* Likewise, we can simplify a comparison of a real constant with
4055 a MINUS_EXPR whose first operand is also a real constant, i.e.
4056 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
4057 floating-point types only if -fassociative-math is set. */
4058 (if (flag_associative_math)
4059 (simplify
0409237b 4060 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 4061 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 4062 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
4063 (cmp { tem; } @1)))))
4064
4065 /* Fold comparisons against built-in math functions. */
0043b528 4066 (if (flag_unsafe_math_optimizations && ! flag_errno_math)
07cdc2b8
RB
4067 (for sq (SQRT)
4068 (simplify
4069 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
4070 (switch
4071 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
4072 (switch
4073 /* sqrt(x) < y is always false, if y is negative. */
4074 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 4075 { constant_boolean_node (false, type); })
64d3a1f0
RB
4076 /* sqrt(x) > y is always true, if y is negative and we
4077 don't care about NaNs, i.e. negative values of x. */
4078 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
4079 { constant_boolean_node (true, type); })
4080 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
4081 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
4082 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
4083 (switch
4084 /* sqrt(x) < 0 is always false. */
4085 (if (cmp == LT_EXPR)
4086 { constant_boolean_node (false, type); })
4087 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
4088 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
4089 { constant_boolean_node (true, type); })
4090 /* sqrt(x) <= 0 -> x == 0. */
4091 (if (cmp == LE_EXPR)
4092 (eq @0 @1))
4093 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
4094 == or !=. In the last case:
4095
4096 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
4097
4098 if x is negative or NaN. Due to -funsafe-math-optimizations,
4099 the results for other x follow from natural arithmetic. */
4100 (cmp @0 @1)))
0043b528
JJ
4101 (if ((cmp == LT_EXPR
4102 || cmp == LE_EXPR
4103 || cmp == GT_EXPR
4104 || cmp == GE_EXPR)
4105 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4106 /* Give up for -frounding-math. */
4107 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0)))
64d3a1f0
RB
4108 (with
4109 {
0043b528
JJ
4110 REAL_VALUE_TYPE c2;
4111 enum tree_code ncmp = cmp;
4112 const real_format *fmt
4113 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)));
5c88ea94
RS
4114 real_arithmetic (&c2, MULT_EXPR,
4115 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
0043b528
JJ
4116 real_convert (&c2, fmt, &c2);
4117 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c),
4118 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */
4119 if (!REAL_VALUE_ISINF (c2))
4120 {
4121 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
4122 build_real (TREE_TYPE (@0), c2));
4123 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
4124 ncmp = ERROR_MARK;
4125 else if ((cmp == LT_EXPR || cmp == GE_EXPR)
4126 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1)))
4127 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR;
4128 else if ((cmp == LE_EXPR || cmp == GT_EXPR)
4129 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3)))
4130 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR;
4131 else
4132 {
4133 /* With rounding to even, sqrt of up to 3 different values
4134 gives the same normal result, so in some cases c2 needs
4135 to be adjusted. */
4136 REAL_VALUE_TYPE c2alt, tow;
4137 if (cmp == LT_EXPR || cmp == GE_EXPR)
4138 tow = dconst0;
4139 else
4140 real_inf (&tow);
4141 real_nextafter (&c2alt, fmt, &c2, &tow);
4142 real_convert (&c2alt, fmt, &c2alt);
4143 if (REAL_VALUE_ISINF (c2alt))
4144 ncmp = ERROR_MARK;
4145 else
4146 {
4147 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
4148 build_real (TREE_TYPE (@0), c2alt));
4149 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
4150 ncmp = ERROR_MARK;
4151 else if (real_equal (&TREE_REAL_CST (c3),
4152 &TREE_REAL_CST (@1)))
4153 c2 = c2alt;
4154 }
4155 }
4156 }
64d3a1f0 4157 }
0043b528
JJ
4158 (if (cmp == GT_EXPR || cmp == GE_EXPR)
4159 (if (REAL_VALUE_ISINF (c2))
4160 /* sqrt(x) > y is x == +Inf, when y is very large. */
4161 (if (HONOR_INFINITIES (@0))
4162 (eq @0 { build_real (TREE_TYPE (@0), c2); })
4163 { constant_boolean_node (false, type); })
4164 /* sqrt(x) > c is the same as x > c*c. */
4165 (if (ncmp != ERROR_MARK)
4166 (if (ncmp == GE_EXPR)
4167 (ge @0 { build_real (TREE_TYPE (@0), c2); })
4168 (gt @0 { build_real (TREE_TYPE (@0), c2); }))))
4169 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */
4170 (if (REAL_VALUE_ISINF (c2))
4171 (switch
4172 /* sqrt(x) < y is always true, when y is a very large
4173 value and we don't care about NaNs or Infinities. */
4174 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
4175 { constant_boolean_node (true, type); })
4176 /* sqrt(x) < y is x != +Inf when y is very large and we
4177 don't care about NaNs. */
4178 (if (! HONOR_NANS (@0))
4179 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
4180 /* sqrt(x) < y is x >= 0 when y is very large and we
4181 don't care about Infinities. */
4182 (if (! HONOR_INFINITIES (@0))
4183 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
4184 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
4185 (if (GENERIC)
4186 (truth_andif
4187 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
4188 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
4189 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
4190 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0))
4191 (if (ncmp == LT_EXPR)
4192 (lt @0 { build_real (TREE_TYPE (@0), c2); })
4193 (le @0 { build_real (TREE_TYPE (@0), c2); }))
4194 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
4195 (if (ncmp != ERROR_MARK && GENERIC)
4196 (if (ncmp == LT_EXPR)
4197 (truth_andif
4198 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
4199 (lt @0 { build_real (TREE_TYPE (@0), c2); }))
4200 (truth_andif
4201 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
4202 (le @0 { build_real (TREE_TYPE (@0), c2); })))))))))))
0ca2e7f7
PK
4203 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
4204 (simplify
4205 (cmp (sq @0) (sq @1))
4206 (if (! HONOR_NANS (@0))
4207 (cmp @0 @1))))))
2ee05f1e 4208
e41ec71b 4209/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
f3842847
YG
4210(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
4211 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
e41ec71b
YG
4212 (simplify
4213 (cmp (float@0 @1) (float @2))
4214 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
4215 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
4216 (with
4217 {
4218 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
4219 tree type1 = TREE_TYPE (@1);
4220 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
4221 tree type2 = TREE_TYPE (@2);
4222 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
4223 }
4224 (if (fmt.can_represent_integral_type_p (type1)
4225 && fmt.can_represent_integral_type_p (type2))
f3842847
YG
4226 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
4227 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
4228 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
4229 && type1_signed_p >= type2_signed_p)
4230 (icmp @1 (convert @2))
4231 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
4232 && type1_signed_p <= type2_signed_p)
4233 (icmp (convert:type2 @1) @2)
4234 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
4235 && type1_signed_p == type2_signed_p)
4236 (icmp @1 @2))))))))))
e41ec71b 4237
c779bea5
YG
4238/* Optimize various special cases of (FTYPE) N CMP CST. */
4239(for cmp (lt le eq ne ge gt)
4240 icmp (le le eq ne ge ge)
4241 (simplify
4242 (cmp (float @0) REAL_CST@1)
4243 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
4244 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
4245 (with
4246 {
4247 tree itype = TREE_TYPE (@0);
c779bea5
YG
4248 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
4249 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
4250 /* Be careful to preserve any potential exceptions due to
4251 NaNs. qNaNs are ok in == or != context.
4252 TODO: relax under -fno-trapping-math or
4253 -fno-signaling-nans. */
4254 bool exception_p
4255 = real_isnan (cst) && (cst->signalling
c651dca2 4256 || (cmp != EQ_EXPR && cmp != NE_EXPR));
c779bea5
YG
4257 }
4258 /* TODO: allow non-fitting itype and SNaNs when
4259 -fno-trapping-math. */
e41ec71b 4260 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
c779bea5
YG
4261 (with
4262 {
e41ec71b 4263 signop isign = TYPE_SIGN (itype);
c779bea5
YG
4264 REAL_VALUE_TYPE imin, imax;
4265 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
4266 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
4267
4268 REAL_VALUE_TYPE icst;
4269 if (cmp == GT_EXPR || cmp == GE_EXPR)
4270 real_ceil (&icst, fmt, cst);
4271 else if (cmp == LT_EXPR || cmp == LE_EXPR)
4272 real_floor (&icst, fmt, cst);
4273 else
4274 real_trunc (&icst, fmt, cst);
4275
b09bf97b 4276 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
c779bea5
YG
4277
4278 bool overflow_p = false;
4279 wide_int icst_val
4280 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
4281 }
4282 (switch
4283 /* Optimize cases when CST is outside of ITYPE's range. */
4284 (if (real_compare (LT_EXPR, cst, &imin))
4285 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
4286 type); })
4287 (if (real_compare (GT_EXPR, cst, &imax))
4288 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
4289 type); })
4290 /* Remove cast if CST is an integer representable by ITYPE. */
4291 (if (cst_int_p)
4292 (cmp @0 { gcc_assert (!overflow_p);
4293 wide_int_to_tree (itype, icst_val); })
4294 )
4295 /* When CST is fractional, optimize
4296 (FTYPE) N == CST -> 0
4297 (FTYPE) N != CST -> 1. */
4298 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
03cc70b5 4299 { constant_boolean_node (cmp == NE_EXPR, type); })
c779bea5
YG
4300 /* Otherwise replace with sensible integer constant. */
4301 (with
4302 {
4303 gcc_checking_assert (!overflow_p);
4304 }
4305 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
4306
40fd269a
MG
4307/* Fold A /[ex] B CMP C to A CMP B * C. */
4308(for cmp (eq ne)
4309 (simplify
4310 (cmp (exact_div @0 @1) INTEGER_CST@2)
4311 (if (!integer_zerop (@1))
8e6cdc90 4312 (if (wi::to_wide (@2) == 0)
40fd269a
MG
4313 (cmp @0 @2)
4314 (if (TREE_CODE (@1) == INTEGER_CST)
4315 (with
4316 {
4a669ac3 4317 wi::overflow_type ovf;
8e6cdc90
RS
4318 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
4319 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
4320 }
4321 (if (ovf)
4322 { constant_boolean_node (cmp == NE_EXPR, type); }
4323 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
4324(for cmp (lt le gt ge)
4325 (simplify
4326 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
8e6cdc90 4327 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
40fd269a
MG
4328 (with
4329 {
4a669ac3 4330 wi::overflow_type ovf;
8e6cdc90
RS
4331 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
4332 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
40fd269a
MG
4333 }
4334 (if (ovf)
8e6cdc90
RS
4335 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
4336 TYPE_SIGN (TREE_TYPE (@2)))
40fd269a
MG
4337 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
4338 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
4339
9cf60d3b
MG
4340/* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
4341
4342 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
4343 For large C (more than min/B+2^size), this is also true, with the
4344 multiplication computed modulo 2^size.
4345 For intermediate C, this just tests the sign of A. */
4346(for cmp (lt le gt ge)
4347 cmp2 (ge ge lt lt)
4348 (simplify
4349 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
4350 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
4351 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
4352 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
4353 (with
4354 {
4355 tree utype = TREE_TYPE (@2);
4356 wide_int denom = wi::to_wide (@1);
4357 wide_int right = wi::to_wide (@2);
4358 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
4359 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
4360 bool small = wi::leu_p (right, smax);
4361 bool large = wi::geu_p (right, smin);
4362 }
4363 (if (small || large)
4364 (cmp (convert:utype @0) (mult @2 (convert @1)))
4365 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
4366
cfdc4f33
MG
4367/* Unordered tests if either argument is a NaN. */
4368(simplify
4369 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 4370 (if (types_match (@0, @1))
cfdc4f33 4371 (unordered @0 @1)))
257b01ba
MG
4372(simplify
4373 (bit_and (ordered @0 @0) (ordered @1 @1))
4374 (if (types_match (@0, @1))
4375 (ordered @0 @1)))
cfdc4f33
MG
4376(simplify
4377 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
4378 @2)
257b01ba
MG
4379(simplify
4380 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
4381 @2)
e18c1d66 4382
90c6f26c
RB
4383/* Simple range test simplifications. */
4384/* A < B || A >= B -> true. */
5d30c58d
RB
4385(for test1 (lt le le le ne ge)
4386 test2 (ge gt ge ne eq ne)
90c6f26c
RB
4387 (simplify
4388 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
4389 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4390 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
4391 { constant_boolean_node (true, type); })))
4392/* A < B && A >= B -> false. */
4393(for test1 (lt lt lt le ne eq)
4394 test2 (ge gt eq gt eq gt)
4395 (simplify
4396 (bit_and:c (test1 @0 @1) (test2 @0 @1))
4397 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4398 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
4399 { constant_boolean_node (false, type); })))
4400
9ebc3467
YG
4401/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
4402 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
4403
4404 Note that comparisons
4405 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
4406 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
4407 will be canonicalized to above so there's no need to
4408 consider them here.
4409 */
4410
4411(for cmp (le gt)
4412 eqcmp (eq ne)
4413 (simplify
4414 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
4415 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4416 (with
4417 {
4418 tree ty = TREE_TYPE (@0);
4419 unsigned prec = TYPE_PRECISION (ty);
4420 wide_int mask = wi::to_wide (@2, prec);
4421 wide_int rhs = wi::to_wide (@3, prec);
4422 signop sgn = TYPE_SIGN (ty);
4423 }
4424 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
4425 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
4426 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
4427 { build_zero_cst (ty); }))))))
4428
534bd33b
MG
4429/* -A CMP -B -> B CMP A. */
4430(for cmp (tcc_comparison)
4431 scmp (swapped_tcc_comparison)
4432 (simplify
4433 (cmp (negate @0) (negate @1))
4434 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4435 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4436 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4437 (scmp @0 @1)))
4438 (simplify
4439 (cmp (negate @0) CONSTANT_CLASS_P@1)
4440 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4441 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4442 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 4443 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
4444 (if (tem && !TREE_OVERFLOW (tem))
4445 (scmp @0 { tem; }))))))
4446
b0eb889b
MG
4447/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
4448(for op (eq ne)
4449 (simplify
4450 (op (abs @0) zerop@1)
4451 (op @0 @1)))
4452
6358a676
MG
4453/* From fold_sign_changed_comparison and fold_widened_comparison.
4454 FIXME: the lack of symmetry is disturbing. */
79d4f7c6
RB
4455(for cmp (simple_comparison)
4456 (simplify
4457 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 4458 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
4459 /* Disable this optimization if we're casting a function pointer
4460 type on targets that require function pointer canonicalization. */
4461 && !(targetm.have_canonicalize_funcptr_for_compare ()
400bc526
JDA
4462 && ((POINTER_TYPE_P (TREE_TYPE (@00))
4463 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
4464 || (POINTER_TYPE_P (TREE_TYPE (@10))
4465 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
2fde61e3 4466 && single_use (@0))
79d4f7c6
RB
4467 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
4468 && (TREE_CODE (@10) == INTEGER_CST
6358a676 4469 || @1 != @10)
79d4f7c6
RB
4470 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
4471 || cmp == NE_EXPR
4472 || cmp == EQ_EXPR)
6358a676 4473 && !POINTER_TYPE_P (TREE_TYPE (@00)))
79d4f7c6
RB
4474 /* ??? The special-casing of INTEGER_CST conversion was in the original
4475 code and here to avoid a spurious overflow flag on the resulting
4476 constant which fold_convert produces. */
4477 (if (TREE_CODE (@1) == INTEGER_CST)
4478 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
4479 TREE_OVERFLOW (@1)); })
4480 (cmp @00 (convert @1)))
4481
4482 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
4483 /* If possible, express the comparison in the shorter mode. */
4484 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
4485 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
4486 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
4487 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
4488 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
4489 || ((TYPE_PRECISION (TREE_TYPE (@00))
4490 >= TYPE_PRECISION (TREE_TYPE (@10)))
4491 && (TYPE_UNSIGNED (TREE_TYPE (@00))
4492 == TYPE_UNSIGNED (TREE_TYPE (@10))))
4493 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 4494 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
4495 && int_fits_type_p (@10, TREE_TYPE (@00)))))
4496 (cmp @00 (convert @10))
4497 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 4498 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
4499 && !int_fits_type_p (@10, TREE_TYPE (@00)))
4500 (with
4501 {
4502 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4503 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4504 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
4505 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
4506 }
4507 (if (above || below)
4508 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
4509 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
4510 (if (cmp == LT_EXPR || cmp == LE_EXPR)
4511 { constant_boolean_node (above ? true : false, type); }
4512 (if (cmp == GT_EXPR || cmp == GE_EXPR)
4513 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 4514
96a111a3 4515(for cmp (eq ne)
96a111a3 4516 (simplify
5124c34f 4517 /* SSA names are canonicalized to 2nd place. */
96a111a3 4518 (cmp addr@0 SSA_NAME@1)
5124c34f
RB
4519 (with
4520 { poly_int64 off; tree base; }
4521 /* A local variable can never be pointed to by
4522 the default SSA name of an incoming parameter. */
4523 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
4524 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL
4525 && (base = get_base_address (TREE_OPERAND (@0, 0)))
4526 && TREE_CODE (base) == VAR_DECL
4527 && auto_var_in_fn_p (base, current_function_decl))
4528 (if (cmp == NE_EXPR)
4529 { constant_boolean_node (true, type); }
4530 { constant_boolean_node (false, type); })
4531 /* If the address is based on @1 decide using the offset. */
4532 (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off))
4533 && TREE_CODE (base) == MEM_REF
4534 && TREE_OPERAND (base, 0) == @1)
4535 (with { off += mem_ref_offset (base).force_shwi (); }
4536 (if (known_ne (off, 0))
4537 { constant_boolean_node (cmp == NE_EXPR, type); }
4538 (if (known_eq (off, 0))
4539 { constant_boolean_node (cmp == EQ_EXPR, type); }))))))))
96a111a3 4540
66e1cacf
RB
4541/* Equality compare simplifications from fold_binary */
4542(for cmp (eq ne)
4543
4544 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
4545 Similarly for NE_EXPR. */
4546 (simplify
4547 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
4548 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
8e6cdc90 4549 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
66e1cacf
RB
4550 { constant_boolean_node (cmp == NE_EXPR, type); }))
4551
4552 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
4553 (simplify
4554 (cmp (bit_xor @0 @1) integer_zerop)
4555 (cmp @0 @1))
4556
4557 /* (X ^ Y) == Y becomes X == 0.
4558 Likewise (X ^ Y) == X becomes Y == 0. */
4559 (simplify
99e943a2 4560 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
4561 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
4562
4563 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
4564 (simplify
4565 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
4566 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 4567 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
4568
4569 (simplify
4570 (cmp (convert? addr@0) integer_zerop)
4571 (if (tree_single_nonzero_warnv_p (@0, NULL))
73a80434
JJ
4572 { constant_boolean_node (cmp == NE_EXPR, type); }))
4573
4574 /* (X & C) op (Y & C) into (X ^ Y) & C op 0. */
4575 (simplify
4576 (cmp (bit_and:cs @0 @2) (bit_and:cs @1 @2))
4577 (cmp (bit_and (bit_xor @0 @1) @2) { build_zero_cst (TREE_TYPE (@2)); })))
d057c866 4578
6b5c7ee0
JJ
4579/* (X < 0) != (Y < 0) into (X ^ Y) < 0.
4580 (X >= 0) != (Y >= 0) into (X ^ Y) < 0.
4581 (X < 0) == (Y < 0) into (X ^ Y) >= 0.
4582 (X >= 0) == (Y >= 0) into (X ^ Y) >= 0. */
4583(for cmp (eq ne)
4584 ncmp (ge lt)
4585 (for sgncmp (ge lt)
4586 (simplify
4587 (cmp (sgncmp @0 integer_zerop@2) (sgncmp @1 integer_zerop))
4588 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4589 && !TYPE_UNSIGNED (TREE_TYPE (@0))
4590 && types_match (@0, @1))
4591 (ncmp (bit_xor @0 @1) @2)))))
4592/* (X < 0) == (Y >= 0) into (X ^ Y) < 0.
4593 (X < 0) != (Y >= 0) into (X ^ Y) >= 0. */
4594(for cmp (eq ne)
4595 ncmp (lt ge)
4596 (simplify
4597 (cmp:c (lt @0 integer_zerop@2) (ge @1 integer_zerop))
4598 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4599 && !TYPE_UNSIGNED (TREE_TYPE (@0))
4600 && types_match (@0, @1))
4601 (ncmp (bit_xor @0 @1) @2))))
4602
b0eb889b
MG
4603/* If we have (A & C) == C where C is a power of 2, convert this into
4604 (A & C) != 0. Similarly for NE_EXPR. */
4605(for cmp (eq ne)
4606 icmp (ne eq)
4607 (simplify
4608 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
4609 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
03cc70b5 4610
519e0faa
PB
4611/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
4612 convert this into a shift followed by ANDing with D. */
4613(simplify
4614 (cond
4615 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
9e61e48e
JJ
4616 INTEGER_CST@2 integer_zerop)
4617 (if (integer_pow2p (@2))
4618 (with {
4619 int shift = (wi::exact_log2 (wi::to_wide (@2))
4620 - wi::exact_log2 (wi::to_wide (@1)));
4621 }
4622 (if (shift > 0)
4623 (bit_and
4624 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
4625 (bit_and
4626 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
4627 @2)))))
519e0faa 4628
b0eb889b
MG
4629/* If we have (A & C) != 0 where C is the sign bit of A, convert
4630 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
4631(for cmp (eq ne)
4632 ncmp (ge lt)
4633 (simplify
4634 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
4635 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2be65d9e 4636 && type_has_mode_precision_p (TREE_TYPE (@0))
b0eb889b 4637 && element_precision (@2) >= element_precision (@0)
8e6cdc90 4638 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
b0eb889b
MG
4639 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
4640 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
4641
519e0faa 4642/* If we have A < 0 ? C : 0 where C is a power of 2, convert
c0140e3c 4643 this into a right shift or sign extension followed by ANDing with C. */
519e0faa
PB
4644(simplify
4645 (cond
4646 (lt @0 integer_zerop)
9e61e48e
JJ
4647 INTEGER_CST@1 integer_zerop)
4648 (if (integer_pow2p (@1)
4649 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
c0140e3c 4650 (with {
8e6cdc90 4651 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
c0140e3c
JJ
4652 }
4653 (if (shift >= 0)
4654 (bit_and
4655 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
4656 @1)
4657 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
4658 sign extension followed by AND with C will achieve the effect. */
4659 (bit_and (convert @0) @1)))))
519e0faa 4660
68aba1f6
RB
4661/* When the addresses are not directly of decls compare base and offset.
4662 This implements some remaining parts of fold_comparison address
4663 comparisons but still no complete part of it. Still it is good
4664 enough to make fold_stmt not regress when not dispatching to fold_binary. */
4665(for cmp (simple_comparison)
4666 (simplify
f501d5cd 4667 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
4668 (with
4669 {
a90c8804 4670 poly_int64 off0, off1;
68aba1f6
RB
4671 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
4672 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
4673 if (base0 && TREE_CODE (base0) == MEM_REF)
4674 {
aca52e6f 4675 off0 += mem_ref_offset (base0).force_shwi ();
68aba1f6
RB
4676 base0 = TREE_OPERAND (base0, 0);
4677 }
4678 if (base1 && TREE_CODE (base1) == MEM_REF)
4679 {
aca52e6f 4680 off1 += mem_ref_offset (base1).force_shwi ();
68aba1f6
RB
4681 base1 = TREE_OPERAND (base1, 0);
4682 }
4683 }
da571fda
RB
4684 (if (base0 && base1)
4685 (with
4686 {
aad88aed 4687 int equal = 2;
70f40fea
JJ
4688 /* Punt in GENERIC on variables with value expressions;
4689 the value expressions might point to fields/elements
4690 of other vars etc. */
4691 if (GENERIC
4692 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
4693 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
4694 ;
4695 else if (decl_in_symtab_p (base0)
4696 && decl_in_symtab_p (base1))
da571fda
RB
4697 equal = symtab_node::get_create (base0)
4698 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
4699 else if ((DECL_P (base0)
4700 || TREE_CODE (base0) == SSA_NAME
4701 || TREE_CODE (base0) == STRING_CST)
4702 && (DECL_P (base1)
4703 || TREE_CODE (base1) == SSA_NAME
4704 || TREE_CODE (base1) == STRING_CST))
aad88aed 4705 equal = (base0 == base1);
93aa3c4a
JJ
4706 if (equal == 0)
4707 {
a4f9edf3
RB
4708 HOST_WIDE_INT ioff0 = -1, ioff1 = -1;
4709 off0.is_constant (&ioff0);
4710 off1.is_constant (&ioff1);
4711 if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST)
4712 || (TREE_CODE (base0) == STRING_CST && DECL_P (base1))
4713 || (TREE_CODE (base0) == STRING_CST
4714 && TREE_CODE (base1) == STRING_CST
4715 && ioff0 >= 0 && ioff1 >= 0
4716 && ioff0 < TREE_STRING_LENGTH (base0)
4717 && ioff1 < TREE_STRING_LENGTH (base1)
4718 /* This is a too conservative test that the STRING_CSTs
4719 will not end up being string-merged. */
4720 && strncmp (TREE_STRING_POINTER (base0) + ioff0,
4721 TREE_STRING_POINTER (base1) + ioff1,
4722 MIN (TREE_STRING_LENGTH (base0) - ioff0,
4723 TREE_STRING_LENGTH (base1) - ioff1)) != 0))
4724 ;
4725 else if (!DECL_P (base0) || !DECL_P (base1))
93aa3c4a
JJ
4726 equal = 2;
4727 else if (cmp != EQ_EXPR && cmp != NE_EXPR)
4728 equal = 2;
4729 /* If this is a pointer comparison, ignore for now even
4730 valid equalities where one pointer is the offset zero
4731 of one object and the other to one past end of another one. */
4732 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2)))
4733 ;
4734 /* Assume that automatic variables can't be adjacent to global
4735 variables. */
4736 else if (is_global_var (base0) != is_global_var (base1))
4737 ;
4738 else
4739 {
4740 tree sz0 = DECL_SIZE_UNIT (base0);
4741 tree sz1 = DECL_SIZE_UNIT (base1);
4742 /* If sizes are unknown, e.g. VLA or not representable,
4743 punt. */
4744 if (!tree_fits_poly_int64_p (sz0)
4745 || !tree_fits_poly_int64_p (sz1))
4746 equal = 2;
4747 else
4748 {
4749 poly_int64 size0 = tree_to_poly_int64 (sz0);
4750 poly_int64 size1 = tree_to_poly_int64 (sz1);
4751 /* If one offset is pointing (or could be) to the beginning
4752 of one object and the other is pointing to one past the
4753 last byte of the other object, punt. */
4754 if (maybe_eq (off0, 0) && maybe_eq (off1, size1))
4755 equal = 2;
4756 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0))
4757 equal = 2;
4758 /* If both offsets are the same, there are some cases
4759 we know that are ok. Either if we know they aren't
4760 zero, or if we know both sizes are no zero. */
4761 if (equal == 2
4762 && known_eq (off0, off1)
4763 && (known_ne (off0, 0)
4764 || (known_ne (size0, 0) && known_ne (size1, 0))))
4765 equal = 0;
4766 }
4767 }
4768 }
da571fda 4769 }
3fccbb9e
JJ
4770 (if (equal == 1
4771 && (cmp == EQ_EXPR || cmp == NE_EXPR
4772 /* If the offsets are equal we can ignore overflow. */
4773 || known_eq (off0, off1)
4774 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
4775 /* Or if we compare using pointers to decls or strings. */
4776 || (POINTER_TYPE_P (TREE_TYPE (@2))
4777 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
da571fda 4778 (switch
a90c8804
RS
4779 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4780 { constant_boolean_node (known_eq (off0, off1), type); })
4781 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4782 { constant_boolean_node (known_ne (off0, off1), type); })
4783 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
4784 { constant_boolean_node (known_lt (off0, off1), type); })
4785 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
4786 { constant_boolean_node (known_le (off0, off1), type); })
4787 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
4788 { constant_boolean_node (known_ge (off0, off1), type); })
4789 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
4790 { constant_boolean_node (known_gt (off0, off1), type); }))
93aa3c4a
JJ
4791 (if (equal == 0)
4792 (switch
4793 (if (cmp == EQ_EXPR)
4794 { constant_boolean_node (false, type); })
4795 (if (cmp == NE_EXPR)
4796 { constant_boolean_node (true, type); })))))))))
66e1cacf 4797
98998245
RB
4798/* Simplify pointer equality compares using PTA. */
4799(for neeq (ne eq)
4800 (simplify
4801 (neeq @0 @1)
4802 (if (POINTER_TYPE_P (TREE_TYPE (@0))
4803 && ptrs_compare_unequal (@0, @1))
f913ff2a 4804 { constant_boolean_node (neeq != EQ_EXPR, type); })))
98998245 4805
8f63caf6 4806/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
4807 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
4808 Disable the transform if either operand is pointer to function.
4809 This broke pr22051-2.c for arm where function pointer
4810 canonicalizaion is not wanted. */
1c0a8806 4811
8f63caf6
RB
4812(for cmp (ne eq)
4813 (simplify
4814 (cmp (convert @0) INTEGER_CST@1)
f53e7e13
JJ
4815 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
4816 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
4817 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4818 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4819 && POINTER_TYPE_P (TREE_TYPE (@1))
4820 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
4821 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
8f63caf6
RB
4822 (cmp @0 (convert @1)))))
4823
21aacde4
RB
4824/* Non-equality compare simplifications from fold_binary */
4825(for cmp (lt gt le ge)
4826 /* Comparisons with the highest or lowest possible integer of
4827 the specified precision will have known values. */
4828 (simplify
f06e47d7
JJ
4829 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
4830 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
4831 || POINTER_TYPE_P (TREE_TYPE (@1))
4832 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
21aacde4
RB
4833 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
4834 (with
4835 {
f06e47d7
JJ
4836 tree cst = uniform_integer_cst_p (@1);
4837 tree arg1_type = TREE_TYPE (cst);
21aacde4
RB
4838 unsigned int prec = TYPE_PRECISION (arg1_type);
4839 wide_int max = wi::max_value (arg1_type);
4840 wide_int signed_max = wi::max_value (prec, SIGNED);
4841 wide_int min = wi::min_value (arg1_type);
4842 }
4843 (switch
f06e47d7 4844 (if (wi::to_wide (cst) == max)
21aacde4
RB
4845 (switch
4846 (if (cmp == GT_EXPR)
4847 { constant_boolean_node (false, type); })
4848 (if (cmp == GE_EXPR)
4849 (eq @2 @1))
4850 (if (cmp == LE_EXPR)
4851 { constant_boolean_node (true, type); })
4852 (if (cmp == LT_EXPR)
4853 (ne @2 @1))))
f06e47d7 4854 (if (wi::to_wide (cst) == min)
21aacde4
RB
4855 (switch
4856 (if (cmp == LT_EXPR)
4857 { constant_boolean_node (false, type); })
4858 (if (cmp == LE_EXPR)
4859 (eq @2 @1))
4860 (if (cmp == GE_EXPR)
4861 { constant_boolean_node (true, type); })
4862 (if (cmp == GT_EXPR)
4863 (ne @2 @1))))
f06e47d7 4864 (if (wi::to_wide (cst) == max - 1)
9bc22d19
RB
4865 (switch
4866 (if (cmp == GT_EXPR)
f06e47d7
JJ
4867 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4868 wide_int_to_tree (TREE_TYPE (cst),
4869 wi::to_wide (cst)
4870 + 1)); }))
9bc22d19 4871 (if (cmp == LE_EXPR)
f06e47d7
JJ
4872 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4873 wide_int_to_tree (TREE_TYPE (cst),
4874 wi::to_wide (cst)
4875 + 1)); }))))
4876 (if (wi::to_wide (cst) == min + 1)
21aacde4
RB
4877 (switch
4878 (if (cmp == GE_EXPR)
f06e47d7
JJ
4879 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4880 wide_int_to_tree (TREE_TYPE (cst),
4881 wi::to_wide (cst)
4882 - 1)); }))
21aacde4 4883 (if (cmp == LT_EXPR)
f06e47d7
JJ
4884 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4885 wide_int_to_tree (TREE_TYPE (cst),
4886 wi::to_wide (cst)
4887 - 1)); }))))
4888 (if (wi::to_wide (cst) == signed_max
21aacde4
RB
4889 && TYPE_UNSIGNED (arg1_type)
4890 /* We will flip the signedness of the comparison operator
4891 associated with the mode of @1, so the sign bit is
4892 specified by this mode. Check that @1 is the signed
4893 max associated with this sign bit. */
7a504f33 4894 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
21aacde4
RB
4895 /* signed_type does not work on pointer types. */
4896 && INTEGRAL_TYPE_P (arg1_type))
4897 /* The following case also applies to X < signed_max+1
4898 and X >= signed_max+1 because previous transformations. */
4899 (if (cmp == LE_EXPR || cmp == GT_EXPR)
f06e47d7
JJ
4900 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4901 (switch
4902 (if (cst == @1 && cmp == LE_EXPR)
4903 (ge (convert:st @0) { build_zero_cst (st); }))
4904 (if (cst == @1 && cmp == GT_EXPR)
4905 (lt (convert:st @0) { build_zero_cst (st); }))
4906 (if (cmp == LE_EXPR)
4907 (ge (view_convert:st @0) { build_zero_cst (st); }))
4908 (if (cmp == GT_EXPR)
4909 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
03cc70b5 4910
b5d3d787
RB
4911(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4912 /* If the second operand is NaN, the result is constant. */
4913 (simplify
4914 (cmp @0 REAL_CST@1)
4915 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4916 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 4917 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 4918 ? false : true, type); })))
21aacde4 4919
55cf3946
RB
4920/* bool_var != 0 becomes bool_var. */
4921(simplify
b5d3d787 4922 (ne @0 integer_zerop)
55cf3946
RB
4923 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4924 && types_match (type, TREE_TYPE (@0)))
4925 (non_lvalue @0)))
4926/* bool_var == 1 becomes bool_var. */
4927(simplify
b5d3d787 4928 (eq @0 integer_onep)
55cf3946
RB
4929 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4930 && types_match (type, TREE_TYPE (@0)))
4931 (non_lvalue @0)))
b5d3d787
RB
4932/* Do not handle
4933 bool_var == 0 becomes !bool_var or
4934 bool_var != 1 becomes !bool_var
4935 here because that only is good in assignment context as long
4936 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4937 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4938 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 4939
ca1206be
MG
4940/* When one argument is a constant, overflow detection can be simplified.
4941 Currently restricted to single use so as not to interfere too much with
4942 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4943 A + CST CMP A -> A CMP' CST' */
4944(for cmp (lt le ge gt)
4945 out (gt gt le le)
4946 (simplify
a8e9f9a3 4947 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
4948 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4949 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
8e6cdc90 4950 && wi::to_wide (@1) != 0
ca1206be 4951 && single_use (@2))
8e6cdc90
RS
4952 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4953 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4954 wi::max_value (prec, UNSIGNED)
4955 - wi::to_wide (@1)); })))))
ca1206be 4956
3563f78f
MG
4957/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4958 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4959 expects the long form, so we restrict the transformation for now. */
4960(for cmp (gt le)
4961 (simplify
a8e9f9a3 4962 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
4963 (if (single_use (@2)
4964 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
ff336801 4965 && TYPE_UNSIGNED (TREE_TYPE (@0)))
3563f78f 4966 (cmp @1 @0))))
3563f78f 4967
ff336801
JJ
4968/* Optimize A - B + -1 >= A into B >= A for unsigned comparisons. */
4969(for cmp (ge lt)
4970 (simplify
4971 (cmp:c (plus (minus @0 @1) integer_minus_onep) @0)
4972 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4973 && TYPE_UNSIGNED (TREE_TYPE (@0)))
4974 (cmp @1 @0))))
4975
3563f78f 4976/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
4977/* A - B > A */
4978(for cmp (gt le)
4979 out (ne eq)
4980 (simplify
a8e9f9a3 4981 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
4982 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4983 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4984 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4985/* A + B < A */
4986(for cmp (lt ge)
4987 out (ne eq)
4988 (simplify
a8e9f9a3 4989 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
4990 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4991 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4992 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4993
603aeb87 4994/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 4995 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
4996(for cmp (lt ge)
4997 out (ne eq)
4998 (simplify
603aeb87 4999 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
5000 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
5001 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
5002 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 5003
6d938a5d
JJ
5004/* Similarly, for unsigned operands, (((type) A * B) >> prec) != 0 where type
5005 is at least twice as wide as type of A and B, simplify to
5006 __builtin_mul_overflow (A, B, <unused>). */
5007(for cmp (eq ne)
5008 (simplify
5009 (cmp (rshift (mult:s (convert@3 @0) (convert @1)) INTEGER_CST@2)
5010 integer_zerop)
5011 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5012 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
5013 && TYPE_UNSIGNED (TREE_TYPE (@0))
5014 && (TYPE_PRECISION (TREE_TYPE (@3))
5015 >= 2 * TYPE_PRECISION (TREE_TYPE (@0)))
5016 && tree_fits_uhwi_p (@2)
5017 && tree_to_uhwi (@2) == TYPE_PRECISION (TREE_TYPE (@0))
5018 && types_match (@0, @1)
5019 && type_has_mode_precision_p (TREE_TYPE (@0))
5020 && (optab_handler (umulv4_optab, TYPE_MODE (TREE_TYPE (@0)))
5021 != CODE_FOR_nothing))
5022 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
5023 (cmp (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
5024
53f3cd25
RS
5025/* Simplification of math builtins. These rules must all be optimizations
5026 as well as IL simplifications. If there is a possibility that the new
5027 form could be a pessimization, the rule should go in the canonicalization
5028 section that follows this one.
e18c1d66 5029
53f3cd25
RS
5030 Rules can generally go in this section if they satisfy one of
5031 the following:
5032
5033 - the rule describes an identity
5034
5035 - the rule replaces calls with something as simple as addition or
5036 multiplication
5037
5038 - the rule contains unary calls only and simplifies the surrounding
5039 arithmetic. (The idea here is to exclude non-unary calls in which
5040 one operand is constant and in which the call is known to be cheap
5041 when the operand has that value.) */
52c6378a 5042
53f3cd25 5043(if (flag_unsafe_math_optimizations)
52c6378a
N
5044 /* Simplify sqrt(x) * sqrt(x) -> x. */
5045 (simplify
c6cfa2bf 5046 (mult (SQRT_ALL@1 @0) @1)
52c6378a
N
5047 (if (!HONOR_SNANS (type))
5048 @0))
5049
ed17cb57
JW
5050 (for op (plus minus)
5051 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
5052 (simplify
5053 (op (rdiv @0 @1)
5054 (rdiv @2 @1))
5055 (rdiv (op @0 @2) @1)))
5056
5e21d765
WD
5057 (for cmp (lt le gt ge)
5058 neg_cmp (gt ge lt le)
5059 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
5060 (simplify
5061 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
5062 (with
5063 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
5064 (if (tem
5065 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
5066 || (real_zerop (tem) && !real_zerop (@1))))
5067 (switch
5068 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
5069 (cmp @0 { tem; }))
5070 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
5071 (neg_cmp @0 { tem; })))))))
5072
35401640
N
5073 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
5074 (for root (SQRT CBRT)
5075 (simplify
5076 (mult (root:s @0) (root:s @1))
5077 (root (mult @0 @1))))
5078
35401640
N
5079 /* Simplify expN(x) * expN(y) -> expN(x+y). */
5080 (for exps (EXP EXP2 EXP10 POW10)
5081 (simplify
5082 (mult (exps:s @0) (exps:s @1))
5083 (exps (plus @0 @1))))
5084
52c6378a 5085 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
5086 (for root (SQRT CBRT)
5087 (simplify
5088 (rdiv @0 (root:s (rdiv:s @1 @2)))
5089 (mult @0 (root (rdiv @2 @1)))))
5090
5091 /* Simplify x/expN(y) into x*expN(-y). */
5092 (for exps (EXP EXP2 EXP10 POW10)
5093 (simplify
5094 (rdiv @0 (exps:s @1))
5095 (mult @0 (exps (negate @1)))))
52c6378a 5096
eee7b6c4
RB
5097 (for logs (LOG LOG2 LOG10 LOG10)
5098 exps (EXP EXP2 EXP10 POW10)
8acda9b2 5099 /* logN(expN(x)) -> x. */
e18c1d66
RB
5100 (simplify
5101 (logs (exps @0))
8acda9b2
RS
5102 @0)
5103 /* expN(logN(x)) -> x. */
5104 (simplify
5105 (exps (logs @0))
5106 @0))
53f3cd25 5107
e18c1d66
RB
5108 /* Optimize logN(func()) for various exponential functions. We
5109 want to determine the value "x" and the power "exponent" in
5110 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
5111 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
5112 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
5113 (simplify
5114 (logs (exps @0))
c9e926ce
RS
5115 (if (SCALAR_FLOAT_TYPE_P (type))
5116 (with {
5117 tree x;
5118 switch (exps)
5119 {
5120 CASE_CFN_EXP:
5121 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
5122 x = build_real_truncate (type, dconst_e ());
5123 break;
5124 CASE_CFN_EXP2:
5125 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
5126 x = build_real (type, dconst2);
5127 break;
5128 CASE_CFN_EXP10:
5129 CASE_CFN_POW10:
5130 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
5131 {
5132 REAL_VALUE_TYPE dconst10;
5133 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
5134 x = build_real (type, dconst10);
5135 }
5136 break;
5137 default:
5138 gcc_unreachable ();
5139 }
5140 }
5141 (mult (logs { x; }) @0)))))
53f3cd25 5142
e18c1d66
RB
5143 (for logs (LOG LOG
5144 LOG2 LOG2
5145 LOG10 LOG10)
5146 exps (SQRT CBRT)
5147 (simplify
5148 (logs (exps @0))
c9e926ce
RS
5149 (if (SCALAR_FLOAT_TYPE_P (type))
5150 (with {
5151 tree x;
5152 switch (exps)
5153 {
5154 CASE_CFN_SQRT:
5155 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
5156 x = build_real (type, dconsthalf);
5157 break;
5158 CASE_CFN_CBRT:
5159 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
5160 x = build_real_truncate (type, dconst_third ());
5161 break;
5162 default:
5163 gcc_unreachable ();
5164 }
5165 }
5166 (mult { x; } (logs @0))))))
53f3cd25
RS
5167
5168 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
5169 (for logs (LOG LOG2 LOG10)
5170 pows (POW)
5171 (simplify
5172 (logs (pows @0 @1))
53f3cd25
RS
5173 (mult @1 (logs @0))))
5174
848bb6fc
JJ
5175 /* pow(C,x) -> exp(log(C)*x) if C > 0,
5176 or if C is a positive power of 2,
5177 pow(C,x) -> exp2(log2(C)*x). */
30a2c10e 5178#if GIMPLE
e83fe013
WD
5179 (for pows (POW)
5180 exps (EXP)
5181 logs (LOG)
848bb6fc
JJ
5182 exp2s (EXP2)
5183 log2s (LOG2)
e83fe013
WD
5184 (simplify
5185 (pows REAL_CST@0 @1)
848bb6fc 5186 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
ef7866a3
JJ
5187 && real_isfinite (TREE_REAL_CST_PTR (@0))
5188 /* As libmvec doesn't have a vectorized exp2, defer optimizing
5189 the use_exp2 case until after vectorization. It seems actually
5190 beneficial for all constants to postpone this until later,
5191 because exp(log(C)*x), while faster, will have worse precision
5192 and if x folds into a constant too, that is unnecessary
5193 pessimization. */
5194 && canonicalize_math_after_vectorization_p ())
848bb6fc
JJ
5195 (with {
5196 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
5197 bool use_exp2 = false;
bae974e6 5198 if (targetm.libc_has_function (function_c99_misc, TREE_TYPE (@0))
848bb6fc
JJ
5199 && value->cl == rvc_normal)
5200 {
5201 REAL_VALUE_TYPE frac_rvt = *value;
5202 SET_REAL_EXP (&frac_rvt, 1);
5203 if (real_equal (&frac_rvt, &dconst1))
5204 use_exp2 = true;
5205 }
5206 }
5207 (if (!use_exp2)
30a2c10e
JJ
5208 (if (optimize_pow_to_exp (@0, @1))
5209 (exps (mult (logs @0) @1)))
ef7866a3 5210 (exp2s (mult (log2s @0) @1)))))))
30a2c10e 5211#endif
e83fe013 5212
16ef0a8c
JJ
5213 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
5214 (for pows (POW)
5215 exps (EXP EXP2 EXP10 POW10)
5216 logs (LOG LOG2 LOG10 LOG10)
5217 (simplify
5218 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
5219 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
5220 && real_isfinite (TREE_REAL_CST_PTR (@0)))
5221 (exps (plus (mult (logs @0) @1) @2)))))
5222
53f3cd25
RS
5223 (for sqrts (SQRT)
5224 cbrts (CBRT)
b4838d77 5225 pows (POW)
53f3cd25
RS
5226 exps (EXP EXP2 EXP10 POW10)
5227 /* sqrt(expN(x)) -> expN(x*0.5). */
5228 (simplify
5229 (sqrts (exps @0))
5230 (exps (mult @0 { build_real (type, dconsthalf); })))
5231 /* cbrt(expN(x)) -> expN(x/3). */
5232 (simplify
5233 (cbrts (exps @0))
b4838d77
RS
5234 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
5235 /* pow(expN(x), y) -> expN(x*y). */
5236 (simplify
5237 (pows (exps @0) @1)
5238 (exps (mult @0 @1))))
cfed37a0
RS
5239
5240 /* tan(atan(x)) -> x. */
5241 (for tans (TAN)
5242 atans (ATAN)
5243 (simplify
5244 (tans (atans @0))
5245 @0)))
53f3cd25 5246
121ef08b
GB
5247 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
5248 (for sins (SIN)
5249 atans (ATAN)
5250 sqrts (SQRT)
5251 copysigns (COPYSIGN)
5252 (simplify
5253 (sins (atans:s @0))
5254 (with
5255 {
5256 REAL_VALUE_TYPE r_cst;
5257 build_sinatan_real (&r_cst, type);
5258 tree t_cst = build_real (type, r_cst);
5259 tree t_one = build_one_cst (type);
5260 }
5261 (if (SCALAR_FLOAT_TYPE_P (type))
5f054b17 5262 (cond (lt (abs @0) { t_cst; })
121ef08b
GB
5263 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
5264 (copysigns { t_one; } @0))))))
5265
5266/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
5267 (for coss (COS)
5268 atans (ATAN)
5269 sqrts (SQRT)
5270 copysigns (COPYSIGN)
5271 (simplify
5272 (coss (atans:s @0))
5273 (with
5274 {
5275 REAL_VALUE_TYPE r_cst;
5276 build_sinatan_real (&r_cst, type);
5277 tree t_cst = build_real (type, r_cst);
5278 tree t_one = build_one_cst (type);
5279 tree t_zero = build_zero_cst (type);
5280 }
5281 (if (SCALAR_FLOAT_TYPE_P (type))
5f054b17 5282 (cond (lt (abs @0) { t_cst; })
121ef08b
GB
5283 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
5284 (copysigns { t_zero; } @0))))))
5285
4aff6d17
GB
5286 (if (!flag_errno_math)
5287 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
5288 (for sinhs (SINH)
5289 atanhs (ATANH)
5290 sqrts (SQRT)
5291 (simplify
5292 (sinhs (atanhs:s @0))
5293 (with { tree t_one = build_one_cst (type); }
5294 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
5295
5296 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
5297 (for coshs (COSH)
5298 atanhs (ATANH)
5299 sqrts (SQRT)
5300 (simplify
5301 (coshs (atanhs:s @0))
5302 (with { tree t_one = build_one_cst (type); }
5303 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
5304
abcc43f5
RS
5305/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
5306(simplify
e04d2a35 5307 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
5308 (abs @0))
5309
67dbe582 5310/* trunc(trunc(x)) -> trunc(x), etc. */
c6cfa2bf 5311(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
5312 (simplify
5313 (fns (fns @0))
5314 (fns @0)))
5315/* f(x) -> x if x is integer valued and f does nothing for such values. */
c6cfa2bf 5316(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
67dbe582
RS
5317 (simplify
5318 (fns integer_valued_real_p@0)
5319 @0))
67dbe582 5320
4d7836c4
RS
5321/* hypot(x,0) and hypot(0,x) -> abs(x). */
5322(simplify
c9e926ce 5323 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
5324 (abs @0))
5325
b4838d77
RS
5326/* pow(1,x) -> 1. */
5327(simplify
5328 (POW real_onep@0 @1)
5329 @0)
5330
461e4145
RS
5331(simplify
5332 /* copysign(x,x) -> x. */
c6cfa2bf 5333 (COPYSIGN_ALL @0 @0)
461e4145
RS
5334 @0)
5335
bb5e8952
JJ
5336(simplify
5337 /* copysign(x,-x) -> -x. */
5338 (COPYSIGN_ALL @0 (negate@1 @0))
5339 @1)
5340
461e4145
RS
5341(simplify
5342 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
c6cfa2bf 5343 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
461e4145
RS
5344 (abs @0))
5345
86c0733f
RS
5346(for scale (LDEXP SCALBN SCALBLN)
5347 /* ldexp(0, x) -> 0. */
5348 (simplify
5349 (scale real_zerop@0 @1)
5350 @0)
5351 /* ldexp(x, 0) -> x. */
5352 (simplify
5353 (scale @0 integer_zerop@1)
5354 @0)
5355 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
5356 (simplify
5357 (scale REAL_CST@0 @1)
5358 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
5359 @0)))
5360
53f3cd25
RS
5361/* Canonicalization of sequences of math builtins. These rules represent
5362 IL simplifications but are not necessarily optimizations.
5363
5364 The sincos pass is responsible for picking "optimal" implementations
5365 of math builtins, which may be more complicated and can sometimes go
5366 the other way, e.g. converting pow into a sequence of sqrts.
5367 We only want to do these canonicalizations before the pass has run. */
5368
5369(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
5370 /* Simplify tan(x) * cos(x) -> sin(x). */
5371 (simplify
5372 (mult:c (TAN:s @0) (COS:s @0))
5373 (SIN @0))
5374
5375 /* Simplify x * pow(x,c) -> pow(x,c+1). */
5376 (simplify
de3fbea3 5377 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
5378 (if (!TREE_OVERFLOW (@1))
5379 (POW @0 (plus @1 { build_one_cst (type); }))))
5380
5381 /* Simplify sin(x) / cos(x) -> tan(x). */
5382 (simplify
5383 (rdiv (SIN:s @0) (COS:s @0))
5384 (TAN @0))
5385
2066f795
RT
5386 /* Simplify sinh(x) / cosh(x) -> tanh(x). */
5387 (simplify
5388 (rdiv (SINH:s @0) (COSH:s @0))
5389 (TANH @0))
5390
29e304fd
VG
5391 /* Simplify tanh (x) / sinh (x) -> 1.0 / cosh (x). */
5392 (simplify
5393 (rdiv (TANH:s @0) (SINH:s @0))
5394 (rdiv {build_one_cst (type);} (COSH @0)))
5395
53f3cd25
RS
5396 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
5397 (simplify
5398 (rdiv (COS:s @0) (SIN:s @0))
5399 (rdiv { build_one_cst (type); } (TAN @0)))
5400
5401 /* Simplify sin(x) / tan(x) -> cos(x). */
5402 (simplify
5403 (rdiv (SIN:s @0) (TAN:s @0))
5404 (if (! HONOR_NANS (@0)
5405 && ! HONOR_INFINITIES (@0))
c9e926ce 5406 (COS @0)))
53f3cd25
RS
5407
5408 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
5409 (simplify
5410 (rdiv (TAN:s @0) (SIN:s @0))
5411 (if (! HONOR_NANS (@0)
5412 && ! HONOR_INFINITIES (@0))
5413 (rdiv { build_one_cst (type); } (COS @0))))
5414
5415 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
5416 (simplify
5417 (mult (POW:s @0 @1) (POW:s @0 @2))
5418 (POW @0 (plus @1 @2)))
5419
5420 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
5421 (simplify
5422 (mult (POW:s @0 @1) (POW:s @2 @1))
5423 (POW (mult @0 @2) @1))
5424
de3fbea3
RB
5425 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
5426 (simplify
5427 (mult (POWI:s @0 @1) (POWI:s @2 @1))
5428 (POWI (mult @0 @2) @1))
5429
53f3cd25
RS
5430 /* Simplify pow(x,c) / x -> pow(x,c-1). */
5431 (simplify
5432 (rdiv (POW:s @0 REAL_CST@1) @0)
5433 (if (!TREE_OVERFLOW (@1))
5434 (POW @0 (minus @1 { build_one_cst (type); }))))
5435
5436 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
5437 (simplify
5438 (rdiv @0 (POW:s @1 @2))
5439 (mult @0 (POW @1 (negate @2))))
5440
5441 (for sqrts (SQRT)
5442 cbrts (CBRT)
5443 pows (POW)
5444 /* sqrt(sqrt(x)) -> pow(x,1/4). */
5445 (simplify
5446 (sqrts (sqrts @0))
5447 (pows @0 { build_real (type, dconst_quarter ()); }))
5448 /* sqrt(cbrt(x)) -> pow(x,1/6). */
5449 (simplify
5450 (sqrts (cbrts @0))
5451 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
5452 /* cbrt(sqrt(x)) -> pow(x,1/6). */
5453 (simplify
5454 (cbrts (sqrts @0))
5455 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
5456 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
5457 (simplify
5458 (cbrts (cbrts tree_expr_nonnegative_p@0))
5459 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
5460 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
5461 (simplify
5462 (sqrts (pows @0 @1))
5463 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
5464 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
5465 (simplify
5466 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
5467 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
5468 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
5469 (simplify
5470 (pows (sqrts @0) @1)
5471 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
5472 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
5473 (simplify
5474 (pows (cbrts tree_expr_nonnegative_p@0) @1)
5475 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
5476 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
5477 (simplify
5478 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
5479 (pows @0 (mult @1 @2))))
abcc43f5
RS
5480
5481 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
5482 (simplify
5483 (CABS (complex @0 @0))
96285749
RS
5484 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5485
4d7836c4
RS
5486 /* hypot(x,x) -> fabs(x)*sqrt(2). */
5487 (simplify
5488 (HYPOT @0 @0)
5489 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5490
96285749
RS
5491 /* cexp(x+yi) -> exp(x)*cexpi(y). */
5492 (for cexps (CEXP)
5493 exps (EXP)
5494 cexpis (CEXPI)
5495 (simplify
5496 (cexps compositional_complex@0)
bae974e6 5497 (if (targetm.libc_has_function (function_c99_math_complex, TREE_TYPE (@0)))
96285749
RS
5498 (complex
5499 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
5500 (mult @1 (imagpart @2)))))))
e18c1d66 5501
67dbe582
RS
5502(if (canonicalize_math_p ())
5503 /* floor(x) -> trunc(x) if x is nonnegative. */
c6cfa2bf
MM
5504 (for floors (FLOOR_ALL)
5505 truncs (TRUNC_ALL)
67dbe582
RS
5506 (simplify
5507 (floors tree_expr_nonnegative_p@0)
5508 (truncs @0))))
5509
5510(match double_value_p
5511 @0
5512 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
5513(for froms (BUILT_IN_TRUNCL
5514 BUILT_IN_FLOORL
5515 BUILT_IN_CEILL
5516 BUILT_IN_ROUNDL
5517 BUILT_IN_NEARBYINTL
5518 BUILT_IN_RINTL)
5519 tos (BUILT_IN_TRUNC
5520 BUILT_IN_FLOOR
5521 BUILT_IN_CEIL
5522 BUILT_IN_ROUND
5523 BUILT_IN_NEARBYINT
5524 BUILT_IN_RINT)
5525 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
5526 (if (optimize && canonicalize_math_p ())
5527 (simplify
5528 (froms (convert double_value_p@0))
5529 (convert (tos @0)))))
5530
5531(match float_value_p
5532 @0
5533 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
5534(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
5535 BUILT_IN_FLOORL BUILT_IN_FLOOR
5536 BUILT_IN_CEILL BUILT_IN_CEIL
5537 BUILT_IN_ROUNDL BUILT_IN_ROUND
5538 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
5539 BUILT_IN_RINTL BUILT_IN_RINT)
5540 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
5541 BUILT_IN_FLOORF BUILT_IN_FLOORF
5542 BUILT_IN_CEILF BUILT_IN_CEILF
5543 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
5544 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
5545 BUILT_IN_RINTF BUILT_IN_RINTF)
5546 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
5547 if x is a float. */
5dac7dbd 5548 (if (optimize && canonicalize_math_p ()
bae974e6 5549 && targetm.libc_has_function (function_c99_misc, NULL_TREE))
67dbe582
RS
5550 (simplify
5551 (froms (convert float_value_p@0))
5552 (convert (tos @0)))))
5553
543a9bcd
RS
5554(for froms (XFLOORL XCEILL XROUNDL XRINTL)
5555 tos (XFLOOR XCEIL XROUND XRINT)
5556 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
5557 (if (optimize && canonicalize_math_p ())
5558 (simplify
5559 (froms (convert double_value_p@0))
5560 (tos @0))))
5561
5562(for froms (XFLOORL XCEILL XROUNDL XRINTL
5563 XFLOOR XCEIL XROUND XRINT)
5564 tos (XFLOORF XCEILF XROUNDF XRINTF)
5565 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
5566 if x is a float. */
5567 (if (optimize && canonicalize_math_p ())
5568 (simplify
5569 (froms (convert float_value_p@0))
5570 (tos @0))))
5571
5572(if (canonicalize_math_p ())
5573 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
5574 (for floors (IFLOOR LFLOOR LLFLOOR)
5575 (simplify
5576 (floors tree_expr_nonnegative_p@0)
5577 (fix_trunc @0))))
5578
5579(if (canonicalize_math_p ())
5580 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
5581 (for fns (IFLOOR LFLOOR LLFLOOR
5582 ICEIL LCEIL LLCEIL
5583 IROUND LROUND LLROUND)
5584 (simplify
5585 (fns integer_valued_real_p@0)
5586 (fix_trunc @0)))
5587 (if (!flag_errno_math)
5588 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
5589 (for rints (IRINT LRINT LLRINT)
5590 (simplify
5591 (rints integer_valued_real_p@0)
5592 (fix_trunc @0)))))
5593
5594(if (canonicalize_math_p ())
5595 (for ifn (IFLOOR ICEIL IROUND IRINT)
5596 lfn (LFLOOR LCEIL LROUND LRINT)
5597 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
5598 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
5599 sizeof (int) == sizeof (long). */
5600 (if (TYPE_PRECISION (integer_type_node)
5601 == TYPE_PRECISION (long_integer_type_node))
5602 (simplify
5603 (ifn @0)
5604 (lfn:long_integer_type_node @0)))
5605 /* Canonicalize llround (x) to lround (x) on LP64 targets where
5606 sizeof (long long) == sizeof (long). */
5607 (if (TYPE_PRECISION (long_long_integer_type_node)
5608 == TYPE_PRECISION (long_integer_type_node))
5609 (simplify
5610 (llfn @0)
5611 (lfn:long_integer_type_node @0)))))
5612
92c52eab
RS
5613/* cproj(x) -> x if we're ignoring infinities. */
5614(simplify
5615 (CPROJ @0)
5616 (if (!HONOR_INFINITIES (type))
5617 @0))
5618
4534c203
RB
5619/* If the real part is inf and the imag part is known to be
5620 nonnegative, return (inf + 0i). */
5621(simplify
5622 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
5623 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
5624 { build_complex_inf (type, false); }))
5625
4534c203
RB
5626/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
5627(simplify
5628 (CPROJ (complex @0 REAL_CST@1))
5629 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 5630 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 5631
b4838d77
RS
5632(for pows (POW)
5633 sqrts (SQRT)
5634 cbrts (CBRT)
5635 (simplify
5636 (pows @0 REAL_CST@1)
5637 (with {
5638 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
5639 REAL_VALUE_TYPE tmp;
5640 }
5641 (switch
5642 /* pow(x,0) -> 1. */
5643 (if (real_equal (value, &dconst0))
5644 { build_real (type, dconst1); })
5645 /* pow(x,1) -> x. */
5646 (if (real_equal (value, &dconst1))
5647 @0)
5648 /* pow(x,-1) -> 1/x. */
5649 (if (real_equal (value, &dconstm1))
5650 (rdiv { build_real (type, dconst1); } @0))
5651 /* pow(x,0.5) -> sqrt(x). */
5652 (if (flag_unsafe_math_optimizations
5653 && canonicalize_math_p ()
5654 && real_equal (value, &dconsthalf))
5655 (sqrts @0))
5656 /* pow(x,1/3) -> cbrt(x). */
5657 (if (flag_unsafe_math_optimizations
5658 && canonicalize_math_p ()
5659 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
5660 real_equal (value, &tmp)))
5661 (cbrts @0))))))
4534c203 5662
5ddc84ca
RS
5663/* powi(1,x) -> 1. */
5664(simplify
5665 (POWI real_onep@0 @1)
5666 @0)
5667
5668(simplify
5669 (POWI @0 INTEGER_CST@1)
5670 (switch
5671 /* powi(x,0) -> 1. */
8e6cdc90 5672 (if (wi::to_wide (@1) == 0)
5ddc84ca
RS
5673 { build_real (type, dconst1); })
5674 /* powi(x,1) -> x. */
8e6cdc90 5675 (if (wi::to_wide (@1) == 1)
5ddc84ca
RS
5676 @0)
5677 /* powi(x,-1) -> 1/x. */
8e6cdc90 5678 (if (wi::to_wide (@1) == -1)
5ddc84ca
RS
5679 (rdiv { build_real (type, dconst1); } @0))))
5680
03cc70b5 5681/* Narrowing of arithmetic and logical operations.
be144838
JL
5682
5683 These are conceptually similar to the transformations performed for
5684 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
5685 term we want to move all that code out of the front-ends into here. */
5686
8f5331b2
TC
5687/* Convert (outertype)((innertype0)a+(innertype1)b)
5688 into ((newtype)a+(newtype)b) where newtype
5689 is the widest mode from all of these. */
5690(for op (plus minus mult rdiv)
5691 (simplify
5692 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
5693 /* If we have a narrowing conversion of an arithmetic operation where
5694 both operands are widening conversions from the same type as the outer
5695 narrowing conversion. Then convert the innermost operands to a
5696 suitable unsigned type (to avoid introducing undefined behavior),
5697 perform the operation and convert the result to the desired type. */
5698 (if (INTEGRAL_TYPE_P (type)
5699 && op != MULT_EXPR
5700 && op != RDIV_EXPR
5701 /* We check for type compatibility between @0 and @1 below,
5702 so there's no need to check that @2/@4 are integral types. */
5703 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
5704 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
5705 /* The precision of the type of each operand must match the
5706 precision of the mode of each operand, similarly for the
5707 result. */
5708 && type_has_mode_precision_p (TREE_TYPE (@1))
5709 && type_has_mode_precision_p (TREE_TYPE (@2))
5710 && type_has_mode_precision_p (type)
5711 /* The inner conversion must be a widening conversion. */
5712 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
5713 && types_match (@1, type)
5714 && (types_match (@1, @2)
5715 /* Or the second operand is const integer or converted const
5716 integer from valueize. */
5717 || TREE_CODE (@2) == INTEGER_CST))
5718 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
5719 (op @1 (convert @2))
5720 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
5721 (convert (op (convert:utype @1)
5722 (convert:utype @2)))))
5723 (if (FLOAT_TYPE_P (type)
5724 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
5725 == DECIMAL_FLOAT_TYPE_P (type))
5726 (with { tree arg0 = strip_float_extensions (@1);
5727 tree arg1 = strip_float_extensions (@2);
5728 tree itype = TREE_TYPE (@0);
5729 tree ty1 = TREE_TYPE (arg0);
5730 tree ty2 = TREE_TYPE (arg1);
5731 enum tree_code code = TREE_CODE (itype); }
5732 (if (FLOAT_TYPE_P (ty1)
5733 && FLOAT_TYPE_P (ty2))
5734 (with { tree newtype = type;
5735 if (TYPE_MODE (ty1) == SDmode
5736 || TYPE_MODE (ty2) == SDmode
5737 || TYPE_MODE (type) == SDmode)
5738 newtype = dfloat32_type_node;
5739 if (TYPE_MODE (ty1) == DDmode
5740 || TYPE_MODE (ty2) == DDmode
5741 || TYPE_MODE (type) == DDmode)
5742 newtype = dfloat64_type_node;
5743 if (TYPE_MODE (ty1) == TDmode
5744 || TYPE_MODE (ty2) == TDmode
5745 || TYPE_MODE (type) == TDmode)
5746 newtype = dfloat128_type_node; }
5747 (if ((newtype == dfloat32_type_node
5748 || newtype == dfloat64_type_node
5749 || newtype == dfloat128_type_node)
5750 && newtype == type
5751 && types_match (newtype, type))
5752 (op (convert:newtype @1) (convert:newtype @2))
dc5b1191 5753 (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype))
8f5331b2
TC
5754 newtype = ty1;
5755 if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype))
dc5b1191 5756 newtype = ty2; }
8f5331b2
TC
5757 /* Sometimes this transformation is safe (cannot
5758 change results through affecting double rounding
5759 cases) and sometimes it is not. If NEWTYPE is
5760 wider than TYPE, e.g. (float)((long double)double
5761 + (long double)double) converted to
5762 (float)(double + double), the transformation is
5763 unsafe regardless of the details of the types
5764 involved; double rounding can arise if the result
5765 of NEWTYPE arithmetic is a NEWTYPE value half way
5766 between two representable TYPE values but the
5767 exact value is sufficiently different (in the
5768 right direction) for this difference to be
5769 visible in ITYPE arithmetic. If NEWTYPE is the
5770 same as TYPE, however, the transformation may be
5771 safe depending on the types involved: it is safe
5772 if the ITYPE has strictly more than twice as many
5773 mantissa bits as TYPE, can represent infinities
5774 and NaNs if the TYPE can, and has sufficient
5775 exponent range for the product or ratio of two
5776 values representable in the TYPE to be within the
5777 range of normal values of ITYPE. */
5778 (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
5779 && (flag_unsafe_math_optimizations
5780 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
5781 && real_can_shorten_arithmetic (TYPE_MODE (itype),
5782 TYPE_MODE (type))
5783 && !excess_precision_type (newtype)))
5784 && !types_match (itype, newtype))
5785 (convert:type (op (convert:newtype @1)
5786 (convert:newtype @2)))
5787 )))) )
5788 ))
5789)))
48451e8f
JL
5790
5791/* This is another case of narrowing, specifically when there's an outer
5792 BIT_AND_EXPR which masks off bits outside the type of the innermost
5793 operands. Like the previous case we have to convert the operands
9c582551 5794 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
5795 arithmetic operation. */
5796(for op (minus plus)
8fdc6c67
RB
5797 (simplify
5798 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
5799 (if (INTEGRAL_TYPE_P (type)
5800 /* We check for type compatibility between @0 and @1 below,
5801 so there's no need to check that @1/@3 are integral types. */
5802 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5803 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
5804 /* The precision of the type of each operand must match the
5805 precision of the mode of each operand, similarly for the
5806 result. */
2be65d9e
RS
5807 && type_has_mode_precision_p (TREE_TYPE (@0))
5808 && type_has_mode_precision_p (TREE_TYPE (@1))
5809 && type_has_mode_precision_p (type)
8fdc6c67
RB
5810 /* The inner conversion must be a widening conversion. */
5811 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
5812 && types_match (@0, @1)
5813 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
5814 <= TYPE_PRECISION (TREE_TYPE (@0)))
8e6cdc90
RS
5815 && (wi::to_wide (@4)
5816 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
5817 true, TYPE_PRECISION (type))) == 0)
8fdc6c67
RB
5818 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5819 (with { tree ntype = TREE_TYPE (@0); }
5820 (convert (bit_and (op @0 @1) (convert:ntype @4))))
5821 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5822 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
5823 (convert:utype @4))))))))
4f7a5692 5824
03cc70b5 5825/* Transform (@0 < @1 and @0 < @2) to use min,
4f7a5692 5826 (@0 > @1 and @0 > @2) to use max */
dac920e8
MG
5827(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
5828 op (lt le gt ge lt le gt ge )
5829 ext (min min max max max max min min )
4f7a5692 5830 (simplify
dac920e8 5831 (logic (op:cs @0 @1) (op:cs @0 @2))
4618c453
RB
5832 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5833 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
5834 (op @0 (ext @1 @2)))))
5835
7317ef4a
RS
5836(simplify
5837 /* signbit(x) -> 0 if x is nonnegative. */
5838 (SIGNBIT tree_expr_nonnegative_p@0)
5839 { integer_zero_node; })
5840
5841(simplify
5842 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
5843 (SIGNBIT @0)
5844 (if (!HONOR_SIGNED_ZEROS (@0))
5845 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
5846
5847/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
5848(for cmp (eq ne)
5849 (for op (plus minus)
5850 rop (minus plus)
5851 (simplify
5852 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5853 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5854 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
5855 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
5856 && !TYPE_SATURATING (TREE_TYPE (@0)))
5857 (with { tree res = int_const_binop (rop, @2, @1); }
75473a91
RB
5858 (if (TREE_OVERFLOW (res)
5859 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
a8b85ce9
MG
5860 { constant_boolean_node (cmp == NE_EXPR, type); }
5861 (if (single_use (@3))
11c1e63c
JJ
5862 (cmp @0 { TREE_OVERFLOW (res)
5863 ? drop_tree_overflow (res) : res; }))))))))
a8b85ce9
MG
5864(for cmp (lt le gt ge)
5865 (for op (plus minus)
5866 rop (minus plus)
5867 (simplify
5868 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5869 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5870 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5871 (with { tree res = int_const_binop (rop, @2, @1); }
5872 (if (TREE_OVERFLOW (res))
5873 {
5874 fold_overflow_warning (("assuming signed overflow does not occur "
5875 "when simplifying conditional to constant"),
5876 WARN_STRICT_OVERFLOW_CONDITIONAL);
5877 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
5878 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
8e6cdc90
RS
5879 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
5880 TYPE_SIGN (TREE_TYPE (@1)))
a8b85ce9
MG
5881 != (op == MINUS_EXPR);
5882 constant_boolean_node (less == ovf_high, type);
5883 }
5884 (if (single_use (@3))
5885 (with
5886 {
5887 fold_overflow_warning (("assuming signed overflow does not occur "
5888 "when changing X +- C1 cmp C2 to "
5889 "X cmp C2 -+ C1"),
5890 WARN_STRICT_OVERFLOW_COMPARISON);
5891 }
5892 (cmp @0 { res; })))))))))
d3e40b76
RB
5893
5894/* Canonicalizations of BIT_FIELD_REFs. */
5895
6ec96dcb
RB
5896(simplify
5897 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
5898 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
5899
5900(simplify
5901 (BIT_FIELD_REF (view_convert @0) @1 @2)
5902 (BIT_FIELD_REF @0 @1 @2))
5903
5904(simplify
5905 (BIT_FIELD_REF @0 @1 integer_zerop)
5906 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
5907 (view_convert @0)))
5908
d3e40b76
RB
5909(simplify
5910 (BIT_FIELD_REF @0 @1 @2)
5911 (switch
5912 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
5913 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5914 (switch
5915 (if (integer_zerop (@2))
5916 (view_convert (realpart @0)))
5917 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5918 (view_convert (imagpart @0)))))
5919 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5920 && INTEGRAL_TYPE_P (type)
171f6f05
RB
5921 /* On GIMPLE this should only apply to register arguments. */
5922 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
5923 /* A bit-field-ref that referenced the full argument can be stripped. */
5924 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
5925 && integer_zerop (@2))
5926 /* Low-parts can be reduced to integral conversions.
5927 ??? The following doesn't work for PDP endian. */
5928 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
5929 /* Don't even think about BITS_BIG_ENDIAN. */
5930 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
5931 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
5932 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
5933 ? (TYPE_PRECISION (TREE_TYPE (@0))
5934 - TYPE_PRECISION (type))
5935 : 0)) == 0)))
5936 (convert @0))))
5937
5938/* Simplify vector extracts. */
5939
5940(simplify
5941 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
5942 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
5943 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
5944 || (VECTOR_TYPE_P (type)
5945 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
5946 (with
5947 {
5948 tree ctor = (TREE_CODE (@0) == SSA_NAME
5949 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
5950 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
5951 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
5952 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
5953 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
5954 }
5955 (if (n != 0
5956 && (idx % width) == 0
5957 && (n % width) == 0
928686b1
RS
5958 && known_le ((idx + n) / width,
5959 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
d3e40b76
RB
5960 (with
5961 {
5962 idx = idx / width;
5963 n = n / width;
5964 /* Constructor elements can be subvectors. */
d34457c1 5965 poly_uint64 k = 1;
d3e40b76
RB
5966 if (CONSTRUCTOR_NELTS (ctor) != 0)
5967 {
5968 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
5969 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
5970 k = TYPE_VECTOR_SUBPARTS (cons_elem);
5971 }
d34457c1 5972 unsigned HOST_WIDE_INT elt, count, const_k;
d3e40b76
RB
5973 }
5974 (switch
5975 /* We keep an exact subset of the constructor elements. */
d34457c1 5976 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
d3e40b76
RB
5977 (if (CONSTRUCTOR_NELTS (ctor) == 0)
5978 { build_constructor (type, NULL); }
d34457c1
RS
5979 (if (count == 1)
5980 (if (elt < CONSTRUCTOR_NELTS (ctor))
4c1da8ea 5981 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
d34457c1 5982 { build_zero_cst (type); })
c265dfbf
RB
5983 /* We don't want to emit new CTORs unless the old one goes away.
5984 ??? Eventually allow this if the CTOR ends up constant or
5985 uniform. */
5986 (if (single_use (@0))
5987 {
5988 vec<constructor_elt, va_gc> *vals;
5989 vec_alloc (vals, count);
5990 for (unsigned i = 0;
5991 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
5992 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
5993 CONSTRUCTOR_ELT (ctor, elt + i)->value);
5994 build_constructor (type, vals);
5995 }))))
d3e40b76 5996 /* The bitfield references a single constructor element. */
d34457c1
RS
5997 (if (k.is_constant (&const_k)
5998 && idx + n <= (idx / const_k + 1) * const_k)
d3e40b76 5999 (switch
d34457c1 6000 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
d3e40b76 6001 { build_zero_cst (type); })
d34457c1 6002 (if (n == const_k)
4c1da8ea 6003 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
d34457c1
RS
6004 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
6005 @1 { bitsize_int ((idx % const_k) * width); })))))))))
92e29a5e
RB
6006
6007/* Simplify a bit extraction from a bit insertion for the cases with
6008 the inserted element fully covering the extraction or the insertion
6009 not touching the extraction. */
6010(simplify
6011 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
6012 (with
6013 {
6014 unsigned HOST_WIDE_INT isize;
6015 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
6016 isize = TYPE_PRECISION (TREE_TYPE (@1));
6017 else
6018 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
6019 }
6020 (switch
8e6cdc90
RS
6021 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
6022 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
6023 wi::to_wide (@ipos) + isize))
92e29a5e 6024 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
8e6cdc90
RS
6025 wi::to_wide (@rpos)
6026 - wi::to_wide (@ipos)); }))
6027 (if (wi::geu_p (wi::to_wide (@ipos),
6028 wi::to_wide (@rpos) + wi::to_wide (@rsize))
6029 || wi::geu_p (wi::to_wide (@rpos),
6030 wi::to_wide (@ipos) + isize))
92e29a5e 6031 (BIT_FIELD_REF @0 @rsize @rpos)))))
c566cc9f 6032
c453ccc2
RS
6033(if (canonicalize_math_after_vectorization_p ())
6034 (for fmas (FMA)
6035 (simplify
6036 (fmas:c (negate @0) @1 @2)
6037 (IFN_FNMA @0 @1 @2))
6038 (simplify
6039 (fmas @0 @1 (negate @2))
6040 (IFN_FMS @0 @1 @2))
6041 (simplify
6042 (fmas:c (negate @0) @1 (negate @2))
6043 (IFN_FNMS @0 @1 @2))
6044 (simplify
6045 (negate (fmas@3 @0 @1 @2))
6046 (if (single_use (@3))
6047 (IFN_FNMS @0 @1 @2))))
6048
c566cc9f 6049 (simplify
c453ccc2
RS
6050 (IFN_FMS:c (negate @0) @1 @2)
6051 (IFN_FNMS @0 @1 @2))
6052 (simplify
6053 (IFN_FMS @0 @1 (negate @2))
6054 (IFN_FMA @0 @1 @2))
6055 (simplify
6056 (IFN_FMS:c (negate @0) @1 (negate @2))
c566cc9f
RS
6057 (IFN_FNMA @0 @1 @2))
6058 (simplify
c453ccc2
RS
6059 (negate (IFN_FMS@3 @0 @1 @2))
6060 (if (single_use (@3))
6061 (IFN_FNMA @0 @1 @2)))
6062
6063 (simplify
6064 (IFN_FNMA:c (negate @0) @1 @2)
6065 (IFN_FMA @0 @1 @2))
c566cc9f 6066 (simplify
c453ccc2 6067 (IFN_FNMA @0 @1 (negate @2))
c566cc9f
RS
6068 (IFN_FNMS @0 @1 @2))
6069 (simplify
c453ccc2
RS
6070 (IFN_FNMA:c (negate @0) @1 (negate @2))
6071 (IFN_FMS @0 @1 @2))
6072 (simplify
6073 (negate (IFN_FNMA@3 @0 @1 @2))
c566cc9f 6074 (if (single_use (@3))
c453ccc2 6075 (IFN_FMS @0 @1 @2)))
c566cc9f 6076
c453ccc2
RS
6077 (simplify
6078 (IFN_FNMS:c (negate @0) @1 @2)
6079 (IFN_FMS @0 @1 @2))
6080 (simplify
6081 (IFN_FNMS @0 @1 (negate @2))
6082 (IFN_FNMA @0 @1 @2))
6083 (simplify
6084 (IFN_FNMS:c (negate @0) @1 (negate @2))
6085 (IFN_FMA @0 @1 @2))
6086 (simplify
6087 (negate (IFN_FNMS@3 @0 @1 @2))
c566cc9f 6088 (if (single_use (@3))
c453ccc2 6089 (IFN_FMA @0 @1 @2))))
ba6557e2
RS
6090
6091/* POPCOUNT simplifications. */
33bf56dd
RS
6092/* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
6093(simplify
6094 (plus (POPCOUNT:s @0) (POPCOUNT:s @1))
6095 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
6096 (POPCOUNT (bit_ior @0 @1))))
6097
6098/* popcount(X) == 0 is X == 0, and related (in)equalities. */
6099(for popcount (POPCOUNT)
ba6557e2
RS
6100 (for cmp (le eq ne gt)
6101 rep (eq eq ne ne)
6102 (simplify
6103 (cmp (popcount @0) integer_zerop)
6104 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
0d2b3bca 6105
33bf56dd
RS
6106/* Canonicalize POPCOUNT(x)&1 as PARITY(X). */
6107(simplify
6108 (bit_and (POPCOUNT @0) integer_onep)
6109 (PARITY @0))
6110
6111/* PARITY simplifications. */
6112/* parity(~X) is parity(X). */
6113(simplify
6114 (PARITY (bit_not @0))
6115 (PARITY @0))
6116
6117/* parity(X)^parity(Y) is parity(X^Y). */
6118(simplify
6119 (bit_xor (PARITY:s @0) (PARITY:s @1))
6120 (PARITY (bit_xor @0 @1)))
6121
6122/* Common POPCOUNT/PARITY simplifications. */
6123/* popcount(X&C1) is (X>>C2)&1 when C1 == 1<<C2. Same for parity(X&C1). */
6124(for pfun (POPCOUNT PARITY)
6125 (simplify
6126 (pfun @0)
6127 (with { wide_int nz = tree_nonzero_bits (@0); }
6128 (switch
6129 (if (nz == 1)
6130 (convert @0))
6131 (if (wi::popcount (nz) == 1)
6132 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
6133 (convert (rshift:utype (convert:utype @0)
6134 { build_int_cst (integer_type_node,
6135 wi::ctz (nz)); }))))))))
6136
ac87f0f3
DP
6137#if GIMPLE
6138/* 64- and 32-bits branchless implementations of popcount are detected:
6139
6140 int popcount64c (uint64_t x)
6141 {
6142 x -= (x >> 1) & 0x5555555555555555ULL;
6143 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
6144 x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
6145 return (x * 0x0101010101010101ULL) >> 56;
6146 }
6147
6148 int popcount32c (uint32_t x)
6149 {
6150 x -= (x >> 1) & 0x55555555;
6151 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
6152 x = (x + (x >> 4)) & 0x0f0f0f0f;
6153 return (x * 0x01010101) >> 24;
6154 } */
6155(simplify
2efa10d5
JJ
6156 (rshift
6157 (mult
6158 (bit_and
6159 (plus:c
6160 (rshift @8 INTEGER_CST@5)
6161 (plus:c@8
6162 (bit_and @6 INTEGER_CST@7)
6163 (bit_and
6164 (rshift
6165 (minus@6 @0
6166 (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11))
6167 INTEGER_CST@10)
6168 INTEGER_CST@9)))
6169 INTEGER_CST@3)
6170 INTEGER_CST@2)
6171 INTEGER_CST@1)
ac87f0f3 6172 /* Check constants and optab. */
2efa10d5
JJ
6173 (with { unsigned prec = TYPE_PRECISION (type);
6174 int shift = (64 - prec) & 63;
6175 unsigned HOST_WIDE_INT c1
6176 = HOST_WIDE_INT_UC (0x0101010101010101) >> shift;
6177 unsigned HOST_WIDE_INT c2
6178 = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift;
6179 unsigned HOST_WIDE_INT c3
6180 = HOST_WIDE_INT_UC (0x3333333333333333) >> shift;
6181 unsigned HOST_WIDE_INT c4
6182 = HOST_WIDE_INT_UC (0x5555555555555555) >> shift;
6183 }
6184 (if (prec >= 16
6185 && prec <= 64
6186 && pow2p_hwi (prec)
6187 && TYPE_UNSIGNED (type)
6188 && integer_onep (@4)
6189 && wi::to_widest (@10) == 2
6190 && wi::to_widest (@5) == 4
6191 && wi::to_widest (@1) == prec - 8
6192 && tree_to_uhwi (@2) == c1
6193 && tree_to_uhwi (@3) == c2
6194 && tree_to_uhwi (@9) == c3
6195 && tree_to_uhwi (@7) == c3
6196 && tree_to_uhwi (@11) == c4
6197 && direct_internal_fn_supported_p (IFN_POPCOUNT, type,
6198 OPTIMIZE_FOR_BOTH))
6199 (convert (IFN_POPCOUNT:type @0)))))
df569f7d
JJ
6200
6201/* __builtin_ffs needs to deal on many targets with the possible zero
6202 argument. If we know the argument is always non-zero, __builtin_ctz + 1
6203 should lead to better code. */
6204(simplify
6205 (FFS tree_expr_nonzero_p@0)
6206 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6207 && direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@0),
6208 OPTIMIZE_FOR_SPEED))
600cf112
JJ
6209 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
6210 (plus (CTZ:type (convert:utype @0)) { build_one_cst (type); }))))
ac87f0f3
DP
6211#endif
6212
653ab081
JJ
6213(for ffs (BUILT_IN_FFS BUILT_IN_FFSL BUILT_IN_FFSLL
6214 BUILT_IN_FFSIMAX)
6215 /* __builtin_ffs (X) == 0 -> X == 0.
6216 __builtin_ffs (X) == 6 -> (X & 63) == 32. */
6217 (for cmp (eq ne)
6218 (simplify
6219 (cmp (ffs@2 @0) INTEGER_CST@1)
6220 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
6221 (switch
6222 (if (integer_zerop (@1))
6223 (cmp @0 { build_zero_cst (TREE_TYPE (@0)); }))
6224 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) > prec)
6225 { constant_boolean_node (cmp == NE_EXPR ? true : false, type); })
6226 (if (single_use (@2))
6227 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0),
6228 wi::mask (tree_to_uhwi (@1),
6229 false, prec)); })
6230 { wide_int_to_tree (TREE_TYPE (@0),
6231 wi::shifted_mask (tree_to_uhwi (@1) - 1, 1,
6232 false, prec)); }))))))
6233
6234 /* __builtin_ffs (X) > 6 -> X != 0 && (X & 63) == 0. */
6235 (for cmp (gt le)
6236 cmp2 (ne eq)
6237 cmp3 (eq ne)
6238 bit_op (bit_and bit_ior)
6239 (simplify
6240 (cmp (ffs@2 @0) INTEGER_CST@1)
6241 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
6242 (switch
6243 (if (integer_zerop (@1))
6244 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))
6245 (if (tree_int_cst_sgn (@1) < 0)
6246 { constant_boolean_node (cmp == GT_EXPR ? true : false, type); })
6247 (if (wi::to_widest (@1) >= prec)
6248 { constant_boolean_node (cmp == GT_EXPR ? false : true, type); })
6249 (if (wi::to_widest (@1) == prec - 1)
6250 (cmp3 @0 { wide_int_to_tree (TREE_TYPE (@0),
6251 wi::shifted_mask (prec - 1, 1,
6252 false, prec)); }))
6253 (if (single_use (@2))
6254 (bit_op (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })
6255 (cmp3 (bit_and @0
6256 { wide_int_to_tree (TREE_TYPE (@0),
6257 wi::mask (tree_to_uhwi (@1),
6258 false, prec)); })
6259 { build_zero_cst (TREE_TYPE (@0)); }))))))))
6260
0d2b3bca
RS
6261/* Simplify:
6262
6263 a = a1 op a2
6264 r = c ? a : b;
6265
6266 to:
6267
6268 r = c ? a1 op a2 : b;
6269
6270 if the target can do it in one go. This makes the operation conditional
6271 on c, so could drop potentially-trapping arithmetic, but that's a valid
cff1a122
JJ
6272 simplification if the result of the operation isn't needed.
6273
c16504f6
LJH
6274 Avoid speculatively generating a stand-alone vector comparison
6275 on targets that might not support them. Any target implementing
6276 conditional internal functions must support the same comparisons
6277 inside and outside a VEC_COND_EXPR. */
cff1a122 6278
ea5212b7 6279#if GIMPLE
0d2b3bca
RS
6280(for uncond_op (UNCOND_BINARY)
6281 cond_op (COND_BINARY)
6282 (simplify
6283 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
cff1a122
JJ
6284 (with { tree op_type = TREE_TYPE (@4); }
6285 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 6286 && element_precision (type) == element_precision (op_type))
0d2b3bca
RS
6287 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
6288 (simplify
6289 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
cff1a122
JJ
6290 (with { tree op_type = TREE_TYPE (@4); }
6291 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 6292 && element_precision (type) == element_precision (op_type))
0d2b3bca 6293 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
6a86928d 6294
b41d1f6e
RS
6295/* Same for ternary operations. */
6296(for uncond_op (UNCOND_TERNARY)
6297 cond_op (COND_TERNARY)
6298 (simplify
6299 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
cff1a122
JJ
6300 (with { tree op_type = TREE_TYPE (@5); }
6301 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 6302 && element_precision (type) == element_precision (op_type))
b41d1f6e
RS
6303 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
6304 (simplify
6305 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
cff1a122
JJ
6306 (with { tree op_type = TREE_TYPE (@5); }
6307 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
ea5212b7 6308 && element_precision (type) == element_precision (op_type))
b41d1f6e
RS
6309 (view_convert (cond_op (bit_not @0) @2 @3 @4
6310 (view_convert:op_type @1)))))))
ea5212b7 6311#endif
b41d1f6e 6312
6a86928d
RS
6313/* Detect cases in which a VEC_COND_EXPR effectively replaces the
6314 "else" value of an IFN_COND_*. */
6315(for cond_op (COND_BINARY)
6316 (simplify
6317 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
6318 (with { tree op_type = TREE_TYPE (@3); }
6319 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
6320 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
6321 (simplify
6322 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
6323 (with { tree op_type = TREE_TYPE (@5); }
6324 (if (inverse_conditions_p (@0, @2)
6325 && element_precision (type) == element_precision (op_type))
6326 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
b41d1f6e
RS
6327
6328/* Same for ternary operations. */
6329(for cond_op (COND_TERNARY)
6330 (simplify
6331 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
6332 (with { tree op_type = TREE_TYPE (@4); }
6333 (if (element_precision (type) == element_precision (op_type))
2c58d42c
RS
6334 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
6335 (simplify
6336 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
6337 (with { tree op_type = TREE_TYPE (@6); }
6338 (if (inverse_conditions_p (@0, @2)
6339 && element_precision (type) == element_precision (op_type))
6340 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
a19f98d5
RS
6341
6342/* For pointers @0 and @2 and nonnegative constant offset @1, look for
6343 expressions like:
6344
6345 A: (@0 + @1 < @2) | (@2 + @1 < @0)
6346 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
6347
6348 If pointers are known not to wrap, B checks whether @1 bytes starting
6349 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
6350 bytes. A is more efficiently tested as:
6351
6352 A: (sizetype) (@0 + @1 - @2) > @1 * 2
6353
6354 The equivalent expression for B is given by replacing @1 with @1 - 1:
6355
6356 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
6357
6358 @0 and @2 can be swapped in both expressions without changing the result.
6359
6360 The folds rely on sizetype's being unsigned (which is always true)
6361 and on its being the same width as the pointer (which we have to check).
6362
6363 The fold replaces two pointer_plus expressions, two comparisons and
6364 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
6365 the best case it's a saving of two operations. The A fold retains one
6366 of the original pointer_pluses, so is a win even if both pointer_pluses
6367 are used elsewhere. The B fold is a wash if both pointer_pluses are
6368 used elsewhere, since all we end up doing is replacing a comparison with
6369 a pointer_plus. We do still apply the fold under those circumstances
6370 though, in case applying it to other conditions eventually makes one of the
6371 pointer_pluses dead. */
6372(for ior (truth_orif truth_or bit_ior)
6373 (for cmp (le lt)
6374 (simplify
6375 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
6376 (cmp:cs (pointer_plus@4 @2 @1) @0))
6377 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
6378 && TYPE_OVERFLOW_WRAPS (sizetype)
6379 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
6380 /* Calculate the rhs constant. */
6381 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
6382 offset_int rhs = off * 2; }
6383 /* Always fails for negative values. */
6384 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
6385 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
6386 pick a canonical order. This increases the chances of using the
6387 same pointer_plus in multiple checks. */
6388 (with { bool swap_p = tree_swap_operands_p (@0, @2);
6389 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
6390 (if (cmp == LT_EXPR)
6391 (gt (convert:sizetype
6392 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
6393 { swap_p ? @0 : @2; }))
6394 { rhs_tree; })
6395 (gt (convert:sizetype
6396 (pointer_diff:ssizetype
6397 (pointer_plus { swap_p ? @2 : @0; }
6398 { wide_int_to_tree (sizetype, off); })
6399 { swap_p ? @0 : @2; }))
6400 { rhs_tree; })))))))))
f4bf2aab
RS
6401
6402/* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
6403 element of @1. */
6404(for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
6405 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
6406 (with { int i = single_nonzero_element (@1); }
6407 (if (i >= 0)
6408 (with { tree elt = vector_cst_elt (@1, i);
6409 tree elt_type = TREE_TYPE (elt);
6410 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
6411 tree size = bitsize_int (elt_bits);
6412 tree pos = bitsize_int (elt_bits * i); }
6413 (view_convert
6414 (bit_and:elt_type
6415 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
6416 { elt; })))))))
ebd733a7
RB
6417
6418(simplify
6419 (vec_perm @0 @1 VECTOR_CST@2)
6420 (with
6421 {
6422 tree op0 = @0, op1 = @1, op2 = @2;
6423
6424 /* Build a vector of integers from the tree mask. */
6425 vec_perm_builder builder;
6426 if (!tree_to_vec_perm_builder (&builder, op2))
6427 return NULL_TREE;
6428
6429 /* Create a vec_perm_indices for the integer vector. */
6430 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
6431 bool single_arg = (op0 == op1);
6432 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
6433 }
6434 (if (sel.series_p (0, 1, 0, 1))
6435 { op0; }
6436 (if (sel.series_p (0, 1, nelts, 1))
6437 { op1; }
6438 (with
6439 {
6440 if (!single_arg)
6441 {
6442 if (sel.all_from_input_p (0))
6443 op1 = op0;
6444 else if (sel.all_from_input_p (1))
6445 {
6446 op0 = op1;
6447 sel.rotate_inputs (1);
6448 }
4f8b89f0
RB
6449 else if (known_ge (poly_uint64 (sel[0]), nelts))
6450 {
6451 std::swap (op0, op1);
6452 sel.rotate_inputs (1);
6453 }
ebd733a7
RB
6454 }
6455 gassign *def;
6456 tree cop0 = op0, cop1 = op1;
6457 if (TREE_CODE (op0) == SSA_NAME
6458 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
6459 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
6460 cop0 = gimple_assign_rhs1 (def);
6461 if (TREE_CODE (op1) == SSA_NAME
6462 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
6463 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
6464 cop1 = gimple_assign_rhs1 (def);
6465
6466 tree t;
6467 }
6468 (if ((TREE_CODE (cop0) == VECTOR_CST
6469 || TREE_CODE (cop0) == CONSTRUCTOR)
6470 && (TREE_CODE (cop1) == VECTOR_CST
6471 || TREE_CODE (cop1) == CONSTRUCTOR)
6472 && (t = fold_vec_perm (type, cop0, cop1, sel)))
6473 { t; }
6474 (with
6475 {
6476 bool changed = (op0 == op1 && !single_arg);
4f8b89f0
RB
6477 tree ins = NULL_TREE;
6478 unsigned at = 0;
6479
6480 /* See if the permutation is performing a single element
6481 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
6482 in that case. But only if the vector mode is supported,
6483 otherwise this is invalid GIMPLE. */
6484 if (TYPE_MODE (type) != BLKmode
6485 && (TREE_CODE (cop0) == VECTOR_CST
6486 || TREE_CODE (cop0) == CONSTRUCTOR
6487 || TREE_CODE (cop1) == VECTOR_CST
6488 || TREE_CODE (cop1) == CONSTRUCTOR))
6489 {
2ef27856
RB
6490 bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1);
6491 if (insert_first_p)
4f8b89f0
RB
6492 {
6493 /* After canonicalizing the first elt to come from the
6494 first vector we only can insert the first elt from
6495 the first vector. */
6496 at = 0;
cc49641a 6497 if ((ins = fold_read_from_vector (cop0, sel[0])))
00e7f01d 6498 op0 = op1;
4f8b89f0 6499 }
2ef27856
RB
6500 /* The above can fail for two-element vectors which always
6501 appear to insert the first element, so try inserting
6502 into the second lane as well. For more than two
6503 elements that's wasted time. */
6504 if (!insert_first_p || (!ins && maybe_eq (nelts, 2u)))
4f8b89f0
RB
6505 {
6506 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
6507 for (at = 0; at < encoded_nelts; ++at)
6508 if (maybe_ne (sel[at], at))
6509 break;
2ef27856
RB
6510 if (at < encoded_nelts
6511 && (known_eq (at + 1, nelts)
6512 || sel.series_p (at + 1, 1, at + 1, 1)))
4f8b89f0 6513 {
b0a71a18 6514 if (known_lt (poly_uint64 (sel[at]), nelts))
4f8b89f0
RB
6515 ins = fold_read_from_vector (cop0, sel[at]);
6516 else
6517 ins = fold_read_from_vector (cop1, sel[at] - nelts);
6518 }
6519 }
6520 }
ebd733a7
RB
6521
6522 /* Generate a canonical form of the selector. */
4f8b89f0 6523 if (!ins && sel.encoding () != builder)
ebd733a7
RB
6524 {
6525 /* Some targets are deficient and fail to expand a single
6526 argument permutation while still allowing an equivalent
6527 2-argument version. */
6528 tree oldop2 = op2;
6529 if (sel.ninputs () == 2
6530 || can_vec_perm_const_p (TYPE_MODE (type), sel, false))
6531 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
6532 else
6533 {
6534 vec_perm_indices sel2 (builder, 2, nelts);
6535 if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false))
6536 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
6537 else
6538 /* Not directly supported with either encoding,
6539 so use the preferred form. */
6540 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
6541 }
4f8b89f0
RB
6542 if (!operand_equal_p (op2, oldop2, 0))
6543 changed = true;
ebd733a7
RB
6544 }
6545 }
4f8b89f0
RB
6546 (if (ins)
6547 (bit_insert { op0; } { ins; }
d17a896d 6548 { bitsize_int (at * vector_element_bits (type)); })
4f8b89f0
RB
6549 (if (changed)
6550 (vec_perm { op0; } { op1; } { op2; }))))))))))
21caa1a2
PK
6551
6552/* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
6553
6554(match vec_same_elem_p
6555 @0
6556 (if (uniform_vector_p (@0))))
6557
6558(match vec_same_elem_p
6559 (vec_duplicate @0))
6560
6561(simplify
6562 (vec_perm vec_same_elem_p@0 @0 @1)
6563 @0)
b937050d
WD
6564
6565/* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop.
6566 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
6567 constant which when multiplied by a power of 2 contains a unique value
6568 in the top 5 or 6 bits. This is then indexed into a table which maps it
6569 to the number of trailing zeroes. */
6570(match (ctz_table_index @1 @2 @3)
6571 (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))