]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/match.pd
Generalize a<b&a<c -> a<min(b,c)
[thirdparty/gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2018 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 CONSTANT_CLASS_P
33 tree_expr_nonnegative_p
34 tree_expr_nonzero_p
35 integer_valued_real_p
36 integer_pow2p
37 HONOR_NANS)
38
39 /* Operator lists. */
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
51 #include "cfn-operators.pd"
52
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
77
78 /* As opposed to convert?, this still creates a single pattern, so
79 it is not a suitable replacement for convert? in all cases. */
80 (match (nop_convert @0)
81 (convert @0)
82 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
83 (match (nop_convert @0)
84 (view_convert @0)
85 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
86 && known_eq (TYPE_VECTOR_SUBPARTS (type),
87 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
88 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
89 /* This one has to be last, or it shadows the others. */
90 (match (nop_convert @0)
91 @0)
92
93 /* Simplifications of operations with one constant operand and
94 simplifications to constants or single values. */
95
96 (for op (plus pointer_plus minus bit_ior bit_xor)
97 (simplify
98 (op @0 integer_zerop)
99 (non_lvalue @0)))
100
101 /* 0 +p index -> (type)index */
102 (simplify
103 (pointer_plus integer_zerop @1)
104 (non_lvalue (convert @1)))
105
106 /* ptr - 0 -> (type)ptr */
107 (simplify
108 (pointer_diff @0 integer_zerop)
109 (convert @0))
110
111 /* See if ARG1 is zero and X + ARG1 reduces to X.
112 Likewise if the operands are reversed. */
113 (simplify
114 (plus:c @0 real_zerop@1)
115 (if (fold_real_zero_addition_p (type, @1, 0))
116 (non_lvalue @0)))
117
118 /* See if ARG1 is zero and X - ARG1 reduces to X. */
119 (simplify
120 (minus @0 real_zerop@1)
121 (if (fold_real_zero_addition_p (type, @1, 1))
122 (non_lvalue @0)))
123
124 /* Simplify x - x.
125 This is unsafe for certain floats even in non-IEEE formats.
126 In IEEE, it is unsafe because it does wrong for NaNs.
127 Also note that operand_equal_p is always false if an operand
128 is volatile. */
129 (simplify
130 (minus @0 @0)
131 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
132 { build_zero_cst (type); }))
133 (simplify
134 (pointer_diff @@0 @0)
135 { build_zero_cst (type); })
136
137 (simplify
138 (mult @0 integer_zerop@1)
139 @1)
140
141 /* Maybe fold x * 0 to 0. The expressions aren't the same
142 when x is NaN, since x * 0 is also NaN. Nor are they the
143 same in modes with signed zeros, since multiplying a
144 negative value by 0 gives -0, not +0. */
145 (simplify
146 (mult @0 real_zerop@1)
147 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
148 @1))
149
150 /* In IEEE floating point, x*1 is not equivalent to x for snans.
151 Likewise for complex arithmetic with signed zeros. */
152 (simplify
153 (mult @0 real_onep)
154 (if (!HONOR_SNANS (type)
155 && (!HONOR_SIGNED_ZEROS (type)
156 || !COMPLEX_FLOAT_TYPE_P (type)))
157 (non_lvalue @0)))
158
159 /* Transform x * -1.0 into -x. */
160 (simplify
161 (mult @0 real_minus_onep)
162 (if (!HONOR_SNANS (type)
163 && (!HONOR_SIGNED_ZEROS (type)
164 || !COMPLEX_FLOAT_TYPE_P (type)))
165 (negate @0)))
166
167 (for cmp (gt ge lt le)
168 outp (convert convert negate negate)
169 outn (negate negate convert convert)
170 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
171 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
172 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
173 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
174 (simplify
175 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
176 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
177 && types_match (type, TREE_TYPE (@0)))
178 (switch
179 (if (types_match (type, float_type_node))
180 (BUILT_IN_COPYSIGNF @1 (outp @0)))
181 (if (types_match (type, double_type_node))
182 (BUILT_IN_COPYSIGN @1 (outp @0)))
183 (if (types_match (type, long_double_type_node))
184 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
185 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
186 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
187 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
188 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
189 (simplify
190 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
191 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
192 && types_match (type, TREE_TYPE (@0)))
193 (switch
194 (if (types_match (type, float_type_node))
195 (BUILT_IN_COPYSIGNF @1 (outn @0)))
196 (if (types_match (type, double_type_node))
197 (BUILT_IN_COPYSIGN @1 (outn @0)))
198 (if (types_match (type, long_double_type_node))
199 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
200
201 /* Transform X * copysign (1.0, X) into abs(X). */
202 (simplify
203 (mult:c @0 (COPYSIGN_ALL real_onep @0))
204 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
205 (abs @0)))
206
207 /* Transform X * copysign (1.0, -X) into -abs(X). */
208 (simplify
209 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
210 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
211 (negate (abs @0))))
212
213 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
214 (simplify
215 (COPYSIGN_ALL REAL_CST@0 @1)
216 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
217 (COPYSIGN_ALL (negate @0) @1)))
218
219 /* X * 1, X / 1 -> X. */
220 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
221 (simplify
222 (op @0 integer_onep)
223 (non_lvalue @0)))
224
225 /* (A / (1 << B)) -> (A >> B).
226 Only for unsigned A. For signed A, this would not preserve rounding
227 toward zero.
228 For example: (-1 / ( 1 << B)) != -1 >> B. */
229 (simplify
230 (trunc_div @0 (lshift integer_onep@1 @2))
231 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
232 && (!VECTOR_TYPE_P (type)
233 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
234 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
235 (rshift @0 @2)))
236
237 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
238 undefined behavior in constexpr evaluation, and assuming that the division
239 traps enables better optimizations than these anyway. */
240 (for div (trunc_div ceil_div floor_div round_div exact_div)
241 /* 0 / X is always zero. */
242 (simplify
243 (div integer_zerop@0 @1)
244 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
245 (if (!integer_zerop (@1))
246 @0))
247 /* X / -1 is -X. */
248 (simplify
249 (div @0 integer_minus_onep@1)
250 (if (!TYPE_UNSIGNED (type))
251 (negate @0)))
252 /* X / X is one. */
253 (simplify
254 (div @0 @0)
255 /* But not for 0 / 0 so that we can get the proper warnings and errors.
256 And not for _Fract types where we can't build 1. */
257 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
258 { build_one_cst (type); }))
259 /* X / abs (X) is X < 0 ? -1 : 1. */
260 (simplify
261 (div:C @0 (abs @0))
262 (if (INTEGRAL_TYPE_P (type)
263 && TYPE_OVERFLOW_UNDEFINED (type))
264 (cond (lt @0 { build_zero_cst (type); })
265 { build_minus_one_cst (type); } { build_one_cst (type); })))
266 /* X / -X is -1. */
267 (simplify
268 (div:C @0 (negate @0))
269 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
270 && TYPE_OVERFLOW_UNDEFINED (type))
271 { build_minus_one_cst (type); })))
272
273 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
274 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
275 (simplify
276 (floor_div @0 @1)
277 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
278 && TYPE_UNSIGNED (type))
279 (trunc_div @0 @1)))
280
281 /* Combine two successive divisions. Note that combining ceil_div
282 and floor_div is trickier and combining round_div even more so. */
283 (for div (trunc_div exact_div)
284 (simplify
285 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
286 (with {
287 bool overflow_p;
288 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
289 TYPE_SIGN (type), &overflow_p);
290 }
291 (if (!overflow_p)
292 (div @0 { wide_int_to_tree (type, mul); })
293 (if (TYPE_UNSIGNED (type)
294 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
295 { build_zero_cst (type); })))))
296
297 /* Combine successive multiplications. Similar to above, but handling
298 overflow is different. */
299 (simplify
300 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
301 (with {
302 bool overflow_p;
303 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
304 TYPE_SIGN (type), &overflow_p);
305 }
306 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
307 otherwise undefined overflow implies that @0 must be zero. */
308 (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
309 (mult @0 { wide_int_to_tree (type, mul); }))))
310
311 /* Optimize A / A to 1.0 if we don't care about
312 NaNs or Infinities. */
313 (simplify
314 (rdiv @0 @0)
315 (if (FLOAT_TYPE_P (type)
316 && ! HONOR_NANS (type)
317 && ! HONOR_INFINITIES (type))
318 { build_one_cst (type); }))
319
320 /* Optimize -A / A to -1.0 if we don't care about
321 NaNs or Infinities. */
322 (simplify
323 (rdiv:C @0 (negate @0))
324 (if (FLOAT_TYPE_P (type)
325 && ! HONOR_NANS (type)
326 && ! HONOR_INFINITIES (type))
327 { build_minus_one_cst (type); }))
328
329 /* PR71078: x / abs(x) -> copysign (1.0, x) */
330 (simplify
331 (rdiv:C (convert? @0) (convert? (abs @0)))
332 (if (SCALAR_FLOAT_TYPE_P (type)
333 && ! HONOR_NANS (type)
334 && ! HONOR_INFINITIES (type))
335 (switch
336 (if (types_match (type, float_type_node))
337 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
338 (if (types_match (type, double_type_node))
339 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
340 (if (types_match (type, long_double_type_node))
341 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
342
343 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
344 (simplify
345 (rdiv @0 real_onep)
346 (if (!HONOR_SNANS (type))
347 (non_lvalue @0)))
348
349 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
350 (simplify
351 (rdiv @0 real_minus_onep)
352 (if (!HONOR_SNANS (type))
353 (negate @0)))
354
355 (if (flag_reciprocal_math)
356 /* Convert (A/B)/C to A/(B*C). */
357 (simplify
358 (rdiv (rdiv:s @0 @1) @2)
359 (rdiv @0 (mult @1 @2)))
360
361 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
362 (simplify
363 (rdiv @0 (mult:s @1 REAL_CST@2))
364 (with
365 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
366 (if (tem)
367 (rdiv (mult @0 { tem; } ) @1))))
368
369 /* Convert A/(B/C) to (A/B)*C */
370 (simplify
371 (rdiv @0 (rdiv:s @1 @2))
372 (mult (rdiv @0 @1) @2)))
373
374 /* Simplify x / (- y) to -x / y. */
375 (simplify
376 (rdiv @0 (negate @1))
377 (rdiv (negate @0) @1))
378
379 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
380 (for div (trunc_div ceil_div floor_div round_div exact_div)
381 (simplify
382 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
383 (if (integer_pow2p (@2)
384 && tree_int_cst_sgn (@2) > 0
385 && tree_nop_conversion_p (type, TREE_TYPE (@0))
386 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
387 (rshift (convert @0)
388 { build_int_cst (integer_type_node,
389 wi::exact_log2 (wi::to_wide (@2))); }))))
390
391 /* If ARG1 is a constant, we can convert this to a multiply by the
392 reciprocal. This does not have the same rounding properties,
393 so only do this if -freciprocal-math. We can actually
394 always safely do it if ARG1 is a power of two, but it's hard to
395 tell if it is or not in a portable manner. */
396 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
397 (simplify
398 (rdiv @0 cst@1)
399 (if (optimize)
400 (if (flag_reciprocal_math
401 && !real_zerop (@1))
402 (with
403 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
404 (if (tem)
405 (mult @0 { tem; } )))
406 (if (cst != COMPLEX_CST)
407 (with { tree inverse = exact_inverse (type, @1); }
408 (if (inverse)
409 (mult @0 { inverse; } ))))))))
410
411 (for mod (ceil_mod floor_mod round_mod trunc_mod)
412 /* 0 % X is always zero. */
413 (simplify
414 (mod integer_zerop@0 @1)
415 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
416 (if (!integer_zerop (@1))
417 @0))
418 /* X % 1 is always zero. */
419 (simplify
420 (mod @0 integer_onep)
421 { build_zero_cst (type); })
422 /* X % -1 is zero. */
423 (simplify
424 (mod @0 integer_minus_onep@1)
425 (if (!TYPE_UNSIGNED (type))
426 { build_zero_cst (type); }))
427 /* X % X is zero. */
428 (simplify
429 (mod @0 @0)
430 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
431 (if (!integer_zerop (@0))
432 { build_zero_cst (type); }))
433 /* (X % Y) % Y is just X % Y. */
434 (simplify
435 (mod (mod@2 @0 @1) @1)
436 @2)
437 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
438 (simplify
439 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
440 (if (ANY_INTEGRAL_TYPE_P (type)
441 && TYPE_OVERFLOW_UNDEFINED (type)
442 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
443 TYPE_SIGN (type)))
444 { build_zero_cst (type); })))
445
446 /* X % -C is the same as X % C. */
447 (simplify
448 (trunc_mod @0 INTEGER_CST@1)
449 (if (TYPE_SIGN (type) == SIGNED
450 && !TREE_OVERFLOW (@1)
451 && wi::neg_p (wi::to_wide (@1))
452 && !TYPE_OVERFLOW_TRAPS (type)
453 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
454 && !sign_bit_p (@1, @1))
455 (trunc_mod @0 (negate @1))))
456
457 /* X % -Y is the same as X % Y. */
458 (simplify
459 (trunc_mod @0 (convert? (negate @1)))
460 (if (INTEGRAL_TYPE_P (type)
461 && !TYPE_UNSIGNED (type)
462 && !TYPE_OVERFLOW_TRAPS (type)
463 && tree_nop_conversion_p (type, TREE_TYPE (@1))
464 /* Avoid this transformation if X might be INT_MIN or
465 Y might be -1, because we would then change valid
466 INT_MIN % -(-1) into invalid INT_MIN % -1. */
467 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
468 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
469 (TREE_TYPE (@1))))))
470 (trunc_mod @0 (convert @1))))
471
472 /* X - (X / Y) * Y is the same as X % Y. */
473 (simplify
474 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
475 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
476 (convert (trunc_mod @0 @1))))
477
478 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
479 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
480 Also optimize A % (C << N) where C is a power of 2,
481 to A & ((C << N) - 1). */
482 (match (power_of_two_cand @1)
483 INTEGER_CST@1)
484 (match (power_of_two_cand @1)
485 (lshift INTEGER_CST@1 @2))
486 (for mod (trunc_mod floor_mod)
487 (simplify
488 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
489 (if ((TYPE_UNSIGNED (type)
490 || tree_expr_nonnegative_p (@0))
491 && tree_nop_conversion_p (type, TREE_TYPE (@3))
492 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
493 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
494
495 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
496 (simplify
497 (trunc_div (mult @0 integer_pow2p@1) @1)
498 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
499 (bit_and @0 { wide_int_to_tree
500 (type, wi::mask (TYPE_PRECISION (type)
501 - wi::exact_log2 (wi::to_wide (@1)),
502 false, TYPE_PRECISION (type))); })))
503
504 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
505 (simplify
506 (mult (trunc_div @0 integer_pow2p@1) @1)
507 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
508 (bit_and @0 (negate @1))))
509
510 /* Simplify (t * 2) / 2) -> t. */
511 (for div (trunc_div ceil_div floor_div round_div exact_div)
512 (simplify
513 (div (mult:c @0 @1) @1)
514 (if (ANY_INTEGRAL_TYPE_P (type)
515 && TYPE_OVERFLOW_UNDEFINED (type))
516 @0)))
517
518 (for op (negate abs)
519 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
520 (for coss (COS COSH)
521 (simplify
522 (coss (op @0))
523 (coss @0)))
524 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
525 (for pows (POW)
526 (simplify
527 (pows (op @0) REAL_CST@1)
528 (with { HOST_WIDE_INT n; }
529 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
530 (pows @0 @1)))))
531 /* Likewise for powi. */
532 (for pows (POWI)
533 (simplify
534 (pows (op @0) INTEGER_CST@1)
535 (if ((wi::to_wide (@1) & 1) == 0)
536 (pows @0 @1))))
537 /* Strip negate and abs from both operands of hypot. */
538 (for hypots (HYPOT)
539 (simplify
540 (hypots (op @0) @1)
541 (hypots @0 @1))
542 (simplify
543 (hypots @0 (op @1))
544 (hypots @0 @1)))
545 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
546 (for copysigns (COPYSIGN_ALL)
547 (simplify
548 (copysigns (op @0) @1)
549 (copysigns @0 @1))))
550
551 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
552 (simplify
553 (mult (abs@1 @0) @1)
554 (mult @0 @0))
555
556 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
557 (for coss (COS COSH)
558 copysigns (COPYSIGN)
559 (simplify
560 (coss (copysigns @0 @1))
561 (coss @0)))
562
563 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
564 (for pows (POW)
565 copysigns (COPYSIGN)
566 (simplify
567 (pows (copysigns @0 @2) REAL_CST@1)
568 (with { HOST_WIDE_INT n; }
569 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
570 (pows @0 @1)))))
571 /* Likewise for powi. */
572 (for pows (POWI)
573 copysigns (COPYSIGN)
574 (simplify
575 (pows (copysigns @0 @2) INTEGER_CST@1)
576 (if ((wi::to_wide (@1) & 1) == 0)
577 (pows @0 @1))))
578
579 (for hypots (HYPOT)
580 copysigns (COPYSIGN)
581 /* hypot(copysign(x, y), z) -> hypot(x, z). */
582 (simplify
583 (hypots (copysigns @0 @1) @2)
584 (hypots @0 @2))
585 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
586 (simplify
587 (hypots @0 (copysigns @1 @2))
588 (hypots @0 @1)))
589
590 /* copysign(x, CST) -> [-]abs (x). */
591 (for copysigns (COPYSIGN_ALL)
592 (simplify
593 (copysigns @0 REAL_CST@1)
594 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
595 (negate (abs @0))
596 (abs @0))))
597
598 /* copysign(copysign(x, y), z) -> copysign(x, z). */
599 (for copysigns (COPYSIGN_ALL)
600 (simplify
601 (copysigns (copysigns @0 @1) @2)
602 (copysigns @0 @2)))
603
604 /* copysign(x,y)*copysign(x,y) -> x*x. */
605 (for copysigns (COPYSIGN_ALL)
606 (simplify
607 (mult (copysigns@2 @0 @1) @2)
608 (mult @0 @0)))
609
610 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
611 (for ccoss (CCOS CCOSH)
612 (simplify
613 (ccoss (negate @0))
614 (ccoss @0)))
615
616 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
617 (for ops (conj negate)
618 (for cabss (CABS)
619 (simplify
620 (cabss (ops @0))
621 (cabss @0))))
622
623 /* Fold (a * (1 << b)) into (a << b) */
624 (simplify
625 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
626 (if (! FLOAT_TYPE_P (type)
627 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
628 (lshift @0 @2)))
629
630 /* Fold (1 << (C - x)) where C = precision(type) - 1
631 into ((1 << C) >> x). */
632 (simplify
633 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
634 (if (INTEGRAL_TYPE_P (type)
635 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
636 && single_use (@1))
637 (if (TYPE_UNSIGNED (type))
638 (rshift (lshift @0 @2) @3)
639 (with
640 { tree utype = unsigned_type_for (type); }
641 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
642
643 /* Fold (C1/X)*C2 into (C1*C2)/X. */
644 (simplify
645 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
646 (if (flag_associative_math
647 && single_use (@3))
648 (with
649 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
650 (if (tem)
651 (rdiv { tem; } @1)))))
652
653 /* Simplify ~X & X as zero. */
654 (simplify
655 (bit_and:c (convert? @0) (convert? (bit_not @0)))
656 { build_zero_cst (type); })
657
658 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
659 (simplify
660 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
661 (if (TYPE_UNSIGNED (type))
662 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
663
664 (for bitop (bit_and bit_ior)
665 cmp (eq ne)
666 /* PR35691: Transform
667 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
668 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
669 (simplify
670 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
671 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
672 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
673 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
674 (cmp (bit_ior @0 (convert @1)) @2)))
675 /* Transform:
676 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
677 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
678 (simplify
679 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
680 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
681 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
682 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
683 (cmp (bit_and @0 (convert @1)) @2))))
684
685 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
686 (simplify
687 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
688 (minus (bit_xor @0 @1) @1))
689 (simplify
690 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
691 (if (~wi::to_wide (@2) == wi::to_wide (@1))
692 (minus (bit_xor @0 @1) @1)))
693
694 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
695 (simplify
696 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
697 (minus @1 (bit_xor @0 @1)))
698
699 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
700 (for op (bit_ior bit_xor plus)
701 (simplify
702 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
703 (bit_xor @0 @1))
704 (simplify
705 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
706 (if (~wi::to_wide (@2) == wi::to_wide (@1))
707 (bit_xor @0 @1))))
708
709 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
710 (simplify
711 (bit_ior:c (bit_xor:c @0 @1) @0)
712 (bit_ior @0 @1))
713
714 /* (a & ~b) | (a ^ b) --> a ^ b */
715 (simplify
716 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
717 @2)
718
719 /* (a & ~b) ^ ~a --> ~(a & b) */
720 (simplify
721 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
722 (bit_not (bit_and @0 @1)))
723
724 /* (a | b) & ~(a ^ b) --> a & b */
725 (simplify
726 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
727 (bit_and @0 @1))
728
729 /* a | ~(a ^ b) --> a | ~b */
730 (simplify
731 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
732 (bit_ior @0 (bit_not @1)))
733
734 /* (a | b) | (a &^ b) --> a | b */
735 (for op (bit_and bit_xor)
736 (simplify
737 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
738 @2))
739
740 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
741 (simplify
742 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
743 @2)
744
745 /* ~(~a & b) --> a | ~b */
746 (simplify
747 (bit_not (bit_and:cs (bit_not @0) @1))
748 (bit_ior @0 (bit_not @1)))
749
750 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
751 #if GIMPLE
752 (simplify
753 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
754 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
755 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
756 (bit_xor @0 @1)))
757 #endif
758
759 /* X % Y is smaller than Y. */
760 (for cmp (lt ge)
761 (simplify
762 (cmp (trunc_mod @0 @1) @1)
763 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
764 { constant_boolean_node (cmp == LT_EXPR, type); })))
765 (for cmp (gt le)
766 (simplify
767 (cmp @1 (trunc_mod @0 @1))
768 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
769 { constant_boolean_node (cmp == GT_EXPR, type); })))
770
771 /* x | ~0 -> ~0 */
772 (simplify
773 (bit_ior @0 integer_all_onesp@1)
774 @1)
775
776 /* x | 0 -> x */
777 (simplify
778 (bit_ior @0 integer_zerop)
779 @0)
780
781 /* x & 0 -> 0 */
782 (simplify
783 (bit_and @0 integer_zerop@1)
784 @1)
785
786 /* ~x | x -> -1 */
787 /* ~x ^ x -> -1 */
788 /* ~x + x -> -1 */
789 (for op (bit_ior bit_xor plus)
790 (simplify
791 (op:c (convert? @0) (convert? (bit_not @0)))
792 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
793
794 /* x ^ x -> 0 */
795 (simplify
796 (bit_xor @0 @0)
797 { build_zero_cst (type); })
798
799 /* Canonicalize X ^ ~0 to ~X. */
800 (simplify
801 (bit_xor @0 integer_all_onesp@1)
802 (bit_not @0))
803
804 /* x & ~0 -> x */
805 (simplify
806 (bit_and @0 integer_all_onesp)
807 (non_lvalue @0))
808
809 /* x & x -> x, x | x -> x */
810 (for bitop (bit_and bit_ior)
811 (simplify
812 (bitop @0 @0)
813 (non_lvalue @0)))
814
815 /* x & C -> x if we know that x & ~C == 0. */
816 #if GIMPLE
817 (simplify
818 (bit_and SSA_NAME@0 INTEGER_CST@1)
819 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
820 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
821 @0))
822 #endif
823
824 /* x + (x & 1) -> (x + 1) & ~1 */
825 (simplify
826 (plus:c @0 (bit_and:s @0 integer_onep@1))
827 (bit_and (plus @0 @1) (bit_not @1)))
828
829 /* x & ~(x & y) -> x & ~y */
830 /* x | ~(x | y) -> x | ~y */
831 (for bitop (bit_and bit_ior)
832 (simplify
833 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
834 (bitop @0 (bit_not @1))))
835
836 /* (x | y) & ~x -> y & ~x */
837 /* (x & y) | ~x -> y | ~x */
838 (for bitop (bit_and bit_ior)
839 rbitop (bit_ior bit_and)
840 (simplify
841 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
842 (bitop @1 @2)))
843
844 /* (x & y) ^ (x | y) -> x ^ y */
845 (simplify
846 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
847 (bit_xor @0 @1))
848
849 /* (x ^ y) ^ (x | y) -> x & y */
850 (simplify
851 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
852 (bit_and @0 @1))
853
854 /* (x & y) + (x ^ y) -> x | y */
855 /* (x & y) | (x ^ y) -> x | y */
856 /* (x & y) ^ (x ^ y) -> x | y */
857 (for op (plus bit_ior bit_xor)
858 (simplify
859 (op:c (bit_and @0 @1) (bit_xor @0 @1))
860 (bit_ior @0 @1)))
861
862 /* (x & y) + (x | y) -> x + y */
863 (simplify
864 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
865 (plus @0 @1))
866
867 /* (x + y) - (x | y) -> x & y */
868 (simplify
869 (minus (plus @0 @1) (bit_ior @0 @1))
870 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
871 && !TYPE_SATURATING (type))
872 (bit_and @0 @1)))
873
874 /* (x + y) - (x & y) -> x | y */
875 (simplify
876 (minus (plus @0 @1) (bit_and @0 @1))
877 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
878 && !TYPE_SATURATING (type))
879 (bit_ior @0 @1)))
880
881 /* (x | y) - (x ^ y) -> x & y */
882 (simplify
883 (minus (bit_ior @0 @1) (bit_xor @0 @1))
884 (bit_and @0 @1))
885
886 /* (x | y) - (x & y) -> x ^ y */
887 (simplify
888 (minus (bit_ior @0 @1) (bit_and @0 @1))
889 (bit_xor @0 @1))
890
891 /* (x | y) & ~(x & y) -> x ^ y */
892 (simplify
893 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
894 (bit_xor @0 @1))
895
896 /* (x | y) & (~x ^ y) -> x & y */
897 (simplify
898 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
899 (bit_and @0 @1))
900
901 /* ~x & ~y -> ~(x | y)
902 ~x | ~y -> ~(x & y) */
903 (for op (bit_and bit_ior)
904 rop (bit_ior bit_and)
905 (simplify
906 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
907 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
908 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
909 (bit_not (rop (convert @0) (convert @1))))))
910
911 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
912 with a constant, and the two constants have no bits in common,
913 we should treat this as a BIT_IOR_EXPR since this may produce more
914 simplifications. */
915 (for op (bit_xor plus)
916 (simplify
917 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
918 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
919 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
920 && tree_nop_conversion_p (type, TREE_TYPE (@2))
921 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
922 (bit_ior (convert @4) (convert @5)))))
923
924 /* (X | Y) ^ X -> Y & ~ X*/
925 (simplify
926 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
927 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
928 (convert (bit_and @1 (bit_not @0)))))
929
930 /* Convert ~X ^ ~Y to X ^ Y. */
931 (simplify
932 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
933 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
934 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
935 (bit_xor (convert @0) (convert @1))))
936
937 /* Convert ~X ^ C to X ^ ~C. */
938 (simplify
939 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
940 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
941 (bit_xor (convert @0) (bit_not @1))))
942
943 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
944 (for opo (bit_and bit_xor)
945 opi (bit_xor bit_and)
946 (simplify
947 (opo:c (opi:c @0 @1) @1)
948 (bit_and (bit_not @0) @1)))
949
950 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
951 operands are another bit-wise operation with a common input. If so,
952 distribute the bit operations to save an operation and possibly two if
953 constants are involved. For example, convert
954 (A | B) & (A | C) into A | (B & C)
955 Further simplification will occur if B and C are constants. */
956 (for op (bit_and bit_ior bit_xor)
957 rop (bit_ior bit_and bit_and)
958 (simplify
959 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
960 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
961 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
962 (rop (convert @0) (op (convert @1) (convert @2))))))
963
964 /* Some simple reassociation for bit operations, also handled in reassoc. */
965 /* (X & Y) & Y -> X & Y
966 (X | Y) | Y -> X | Y */
967 (for op (bit_and bit_ior)
968 (simplify
969 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
970 @2))
971 /* (X ^ Y) ^ Y -> X */
972 (simplify
973 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
974 (convert @0))
975 /* (X & Y) & (X & Z) -> (X & Y) & Z
976 (X | Y) | (X | Z) -> (X | Y) | Z */
977 (for op (bit_and bit_ior)
978 (simplify
979 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
980 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
981 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
982 (if (single_use (@5) && single_use (@6))
983 (op @3 (convert @2))
984 (if (single_use (@3) && single_use (@4))
985 (op (convert @1) @5))))))
986 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
987 (simplify
988 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
989 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
990 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
991 (bit_xor (convert @1) (convert @2))))
992
993 (simplify
994 (abs (abs@1 @0))
995 @1)
996 (simplify
997 (abs (negate @0))
998 (abs @0))
999 (simplify
1000 (abs tree_expr_nonnegative_p@0)
1001 @0)
1002
1003 /* A few cases of fold-const.c negate_expr_p predicate. */
1004 (match negate_expr_p
1005 INTEGER_CST
1006 (if ((INTEGRAL_TYPE_P (type)
1007 && TYPE_UNSIGNED (type))
1008 || (!TYPE_OVERFLOW_SANITIZED (type)
1009 && may_negate_without_overflow_p (t)))))
1010 (match negate_expr_p
1011 FIXED_CST)
1012 (match negate_expr_p
1013 (negate @0)
1014 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1015 (match negate_expr_p
1016 REAL_CST
1017 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1018 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1019 ways. */
1020 (match negate_expr_p
1021 VECTOR_CST
1022 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1023 (match negate_expr_p
1024 (minus @0 @1)
1025 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1026 || (FLOAT_TYPE_P (type)
1027 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1028 && !HONOR_SIGNED_ZEROS (type)))))
1029
1030 /* (-A) * (-B) -> A * B */
1031 (simplify
1032 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1033 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1034 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1035 (mult (convert @0) (convert (negate @1)))))
1036
1037 /* -(A + B) -> (-B) - A. */
1038 (simplify
1039 (negate (plus:c @0 negate_expr_p@1))
1040 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1041 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1042 (minus (negate @1) @0)))
1043
1044 /* -(A - B) -> B - A. */
1045 (simplify
1046 (negate (minus @0 @1))
1047 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1048 || (FLOAT_TYPE_P (type)
1049 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1050 && !HONOR_SIGNED_ZEROS (type)))
1051 (minus @1 @0)))
1052 (simplify
1053 (negate (pointer_diff @0 @1))
1054 (if (TYPE_OVERFLOW_UNDEFINED (type))
1055 (pointer_diff @1 @0)))
1056
1057 /* A - B -> A + (-B) if B is easily negatable. */
1058 (simplify
1059 (minus @0 negate_expr_p@1)
1060 (if (!FIXED_POINT_TYPE_P (type))
1061 (plus @0 (negate @1))))
1062
1063 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1064 when profitable.
1065 For bitwise binary operations apply operand conversions to the
1066 binary operation result instead of to the operands. This allows
1067 to combine successive conversions and bitwise binary operations.
1068 We combine the above two cases by using a conditional convert. */
1069 (for bitop (bit_and bit_ior bit_xor)
1070 (simplify
1071 (bitop (convert @0) (convert? @1))
1072 (if (((TREE_CODE (@1) == INTEGER_CST
1073 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1074 && int_fits_type_p (@1, TREE_TYPE (@0)))
1075 || types_match (@0, @1))
1076 /* ??? This transform conflicts with fold-const.c doing
1077 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1078 constants (if x has signed type, the sign bit cannot be set
1079 in c). This folds extension into the BIT_AND_EXPR.
1080 Restrict it to GIMPLE to avoid endless recursions. */
1081 && (bitop != BIT_AND_EXPR || GIMPLE)
1082 && (/* That's a good idea if the conversion widens the operand, thus
1083 after hoisting the conversion the operation will be narrower. */
1084 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1085 /* It's also a good idea if the conversion is to a non-integer
1086 mode. */
1087 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1088 /* Or if the precision of TO is not the same as the precision
1089 of its mode. */
1090 || !type_has_mode_precision_p (type)))
1091 (convert (bitop @0 (convert @1))))))
1092
1093 (for bitop (bit_and bit_ior)
1094 rbitop (bit_ior bit_and)
1095 /* (x | y) & x -> x */
1096 /* (x & y) | x -> x */
1097 (simplify
1098 (bitop:c (rbitop:c @0 @1) @0)
1099 @0)
1100 /* (~x | y) & x -> x & y */
1101 /* (~x & y) | x -> x | y */
1102 (simplify
1103 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1104 (bitop @0 @1)))
1105
1106 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1107 (simplify
1108 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1109 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1110
1111 /* Combine successive equal operations with constants. */
1112 (for bitop (bit_and bit_ior bit_xor)
1113 (simplify
1114 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1115 (if (!CONSTANT_CLASS_P (@0))
1116 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1117 folded to a constant. */
1118 (bitop @0 (bitop @1 @2))
1119 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1120 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1121 the values involved are such that the operation can't be decided at
1122 compile time. Try folding one of @0 or @1 with @2 to see whether
1123 that combination can be decided at compile time.
1124
1125 Keep the existing form if both folds fail, to avoid endless
1126 oscillation. */
1127 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1128 (if (cst1)
1129 (bitop @1 { cst1; })
1130 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1131 (if (cst2)
1132 (bitop @0 { cst2; }))))))))
1133
1134 /* Try simple folding for X op !X, and X op X with the help
1135 of the truth_valued_p and logical_inverted_value predicates. */
1136 (match truth_valued_p
1137 @0
1138 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1139 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1140 (match truth_valued_p
1141 (op @0 @1)))
1142 (match truth_valued_p
1143 (truth_not @0))
1144
1145 (match (logical_inverted_value @0)
1146 (truth_not @0))
1147 (match (logical_inverted_value @0)
1148 (bit_not truth_valued_p@0))
1149 (match (logical_inverted_value @0)
1150 (eq @0 integer_zerop))
1151 (match (logical_inverted_value @0)
1152 (ne truth_valued_p@0 integer_truep))
1153 (match (logical_inverted_value @0)
1154 (bit_xor truth_valued_p@0 integer_truep))
1155
1156 /* X & !X -> 0. */
1157 (simplify
1158 (bit_and:c @0 (logical_inverted_value @0))
1159 { build_zero_cst (type); })
1160 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1161 (for op (bit_ior bit_xor)
1162 (simplify
1163 (op:c truth_valued_p@0 (logical_inverted_value @0))
1164 { constant_boolean_node (true, type); }))
1165 /* X ==/!= !X is false/true. */
1166 (for op (eq ne)
1167 (simplify
1168 (op:c truth_valued_p@0 (logical_inverted_value @0))
1169 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1170
1171 /* ~~x -> x */
1172 (simplify
1173 (bit_not (bit_not @0))
1174 @0)
1175
1176 /* Convert ~ (-A) to A - 1. */
1177 (simplify
1178 (bit_not (convert? (negate @0)))
1179 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1180 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1181 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1182
1183 /* Convert - (~A) to A + 1. */
1184 (simplify
1185 (negate (nop_convert (bit_not @0)))
1186 (plus (view_convert @0) { build_each_one_cst (type); }))
1187
1188 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1189 (simplify
1190 (bit_not (convert? (minus @0 integer_each_onep)))
1191 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1192 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1193 (convert (negate @0))))
1194 (simplify
1195 (bit_not (convert? (plus @0 integer_all_onesp)))
1196 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1197 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1198 (convert (negate @0))))
1199
1200 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1201 (simplify
1202 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1203 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1204 (convert (bit_xor @0 (bit_not @1)))))
1205 (simplify
1206 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1207 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1208 (convert (bit_xor @0 @1))))
1209
1210 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1211 (simplify
1212 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1213 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1214 (bit_not (bit_xor (view_convert @0) @1))))
1215
1216 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1217 (simplify
1218 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1219 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1220
1221 /* Fold A - (A & B) into ~B & A. */
1222 (simplify
1223 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1224 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1225 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1226 (convert (bit_and (bit_not @1) @0))))
1227
1228 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1229 (for cmp (gt lt ge le)
1230 (simplify
1231 (mult (convert (cmp @0 @1)) @2)
1232 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1233
1234 /* For integral types with undefined overflow and C != 0 fold
1235 x * C EQ/NE y * C into x EQ/NE y. */
1236 (for cmp (eq ne)
1237 (simplify
1238 (cmp (mult:c @0 @1) (mult:c @2 @1))
1239 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1240 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1241 && tree_expr_nonzero_p (@1))
1242 (cmp @0 @2))))
1243
1244 /* For integral types with wrapping overflow and C odd fold
1245 x * C EQ/NE y * C into x EQ/NE y. */
1246 (for cmp (eq ne)
1247 (simplify
1248 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1249 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1250 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1251 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1252 (cmp @0 @2))))
1253
1254 /* For integral types with undefined overflow and C != 0 fold
1255 x * C RELOP y * C into:
1256
1257 x RELOP y for nonnegative C
1258 y RELOP x for negative C */
1259 (for cmp (lt gt le ge)
1260 (simplify
1261 (cmp (mult:c @0 @1) (mult:c @2 @1))
1262 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1263 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1264 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1265 (cmp @0 @2)
1266 (if (TREE_CODE (@1) == INTEGER_CST
1267 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1268 (cmp @2 @0))))))
1269
1270 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1271 (for cmp (le gt)
1272 icmp (gt le)
1273 (simplify
1274 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1275 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1276 && TYPE_UNSIGNED (TREE_TYPE (@0))
1277 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1278 && (wi::to_wide (@2)
1279 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1280 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1281 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1282
1283 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1284 (for cmp (simple_comparison)
1285 (simplify
1286 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1287 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1288 (cmp @0 @1))))
1289
1290 /* X / C1 op C2 into a simple range test. */
1291 (for cmp (simple_comparison)
1292 (simplify
1293 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1294 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1295 && integer_nonzerop (@1)
1296 && !TREE_OVERFLOW (@1)
1297 && !TREE_OVERFLOW (@2))
1298 (with { tree lo, hi; bool neg_overflow;
1299 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1300 &neg_overflow); }
1301 (switch
1302 (if (code == LT_EXPR || code == GE_EXPR)
1303 (if (TREE_OVERFLOW (lo))
1304 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1305 (if (code == LT_EXPR)
1306 (lt @0 { lo; })
1307 (ge @0 { lo; }))))
1308 (if (code == LE_EXPR || code == GT_EXPR)
1309 (if (TREE_OVERFLOW (hi))
1310 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1311 (if (code == LE_EXPR)
1312 (le @0 { hi; })
1313 (gt @0 { hi; }))))
1314 (if (!lo && !hi)
1315 { build_int_cst (type, code == NE_EXPR); })
1316 (if (code == EQ_EXPR && !hi)
1317 (ge @0 { lo; }))
1318 (if (code == EQ_EXPR && !lo)
1319 (le @0 { hi; }))
1320 (if (code == NE_EXPR && !hi)
1321 (lt @0 { lo; }))
1322 (if (code == NE_EXPR && !lo)
1323 (gt @0 { hi; }))
1324 (if (GENERIC)
1325 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1326 lo, hi); })
1327 (with
1328 {
1329 tree etype = range_check_type (TREE_TYPE (@0));
1330 if (etype)
1331 {
1332 if (! TYPE_UNSIGNED (etype))
1333 etype = unsigned_type_for (etype);
1334 hi = fold_convert (etype, hi);
1335 lo = fold_convert (etype, lo);
1336 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1337 }
1338 }
1339 (if (etype && hi && !TREE_OVERFLOW (hi))
1340 (if (code == EQ_EXPR)
1341 (le (minus (convert:etype @0) { lo; }) { hi; })
1342 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1343
1344 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1345 (for op (lt le ge gt)
1346 (simplify
1347 (op (plus:c @0 @2) (plus:c @1 @2))
1348 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1349 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1350 (op @0 @1))))
1351 /* For equality and subtraction, this is also true with wrapping overflow. */
1352 (for op (eq ne minus)
1353 (simplify
1354 (op (plus:c @0 @2) (plus:c @1 @2))
1355 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1356 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1357 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1358 (op @0 @1))))
1359
1360 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1361 (for op (lt le ge gt)
1362 (simplify
1363 (op (minus @0 @2) (minus @1 @2))
1364 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1365 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1366 (op @0 @1))))
1367 /* For equality and subtraction, this is also true with wrapping overflow. */
1368 (for op (eq ne minus)
1369 (simplify
1370 (op (minus @0 @2) (minus @1 @2))
1371 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1372 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1373 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1374 (op @0 @1))))
1375 /* And for pointers... */
1376 (for op (simple_comparison)
1377 (simplify
1378 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1379 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1380 (op @0 @1))))
1381 (simplify
1382 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1383 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1384 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1385 (pointer_diff @0 @1)))
1386
1387 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1388 (for op (lt le ge gt)
1389 (simplify
1390 (op (minus @2 @0) (minus @2 @1))
1391 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1392 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1393 (op @1 @0))))
1394 /* For equality and subtraction, this is also true with wrapping overflow. */
1395 (for op (eq ne minus)
1396 (simplify
1397 (op (minus @2 @0) (minus @2 @1))
1398 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1399 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1400 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1401 (op @1 @0))))
1402 /* And for pointers... */
1403 (for op (simple_comparison)
1404 (simplify
1405 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1406 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1407 (op @1 @0))))
1408 (simplify
1409 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1410 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1411 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1412 (pointer_diff @1 @0)))
1413
1414 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1415 (for op (lt le gt ge)
1416 (simplify
1417 (op:c (plus:c@2 @0 @1) @1)
1418 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1419 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1420 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1421 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1422 /* For equality, this is also true with wrapping overflow. */
1423 (for op (eq ne)
1424 (simplify
1425 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1426 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1427 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1428 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1429 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1430 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1431 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1432 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1433 (simplify
1434 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1435 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1436 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1437 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1438 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1439
1440 /* X - Y < X is the same as Y > 0 when there is no overflow.
1441 For equality, this is also true with wrapping overflow. */
1442 (for op (simple_comparison)
1443 (simplify
1444 (op:c @0 (minus@2 @0 @1))
1445 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1446 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1447 || ((op == EQ_EXPR || op == NE_EXPR)
1448 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1449 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1450 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1451
1452 /* Transform:
1453 * (X / Y) == 0 -> X < Y if X, Y are unsigned.
1454 * (X / Y) != 0 -> X >= Y, if X, Y are unsigned.
1455 */
1456 (for cmp (eq ne)
1457 ocmp (lt ge)
1458 (simplify
1459 (cmp (trunc_div @0 @1) integer_zerop)
1460 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1461 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1462 (ocmp @0 @1))))
1463
1464 /* X == C - X can never be true if C is odd. */
1465 (for cmp (eq ne)
1466 (simplify
1467 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1468 (if (TREE_INT_CST_LOW (@1) & 1)
1469 { constant_boolean_node (cmp == NE_EXPR, type); })))
1470
1471 /* Arguments on which one can call get_nonzero_bits to get the bits
1472 possibly set. */
1473 (match with_possible_nonzero_bits
1474 INTEGER_CST@0)
1475 (match with_possible_nonzero_bits
1476 SSA_NAME@0
1477 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1478 /* Slightly extended version, do not make it recursive to keep it cheap. */
1479 (match (with_possible_nonzero_bits2 @0)
1480 with_possible_nonzero_bits@0)
1481 (match (with_possible_nonzero_bits2 @0)
1482 (bit_and:c with_possible_nonzero_bits@0 @2))
1483
1484 /* Same for bits that are known to be set, but we do not have
1485 an equivalent to get_nonzero_bits yet. */
1486 (match (with_certain_nonzero_bits2 @0)
1487 INTEGER_CST@0)
1488 (match (with_certain_nonzero_bits2 @0)
1489 (bit_ior @1 INTEGER_CST@0))
1490
1491 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1492 (for cmp (eq ne)
1493 (simplify
1494 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1495 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1496 { constant_boolean_node (cmp == NE_EXPR, type); })))
1497
1498 /* ((X inner_op C0) outer_op C1)
1499 With X being a tree where value_range has reasoned certain bits to always be
1500 zero throughout its computed value range,
1501 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1502 where zero_mask has 1's for all bits that are sure to be 0 in
1503 and 0's otherwise.
1504 if (inner_op == '^') C0 &= ~C1;
1505 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1506 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1507 */
1508 (for inner_op (bit_ior bit_xor)
1509 outer_op (bit_xor bit_ior)
1510 (simplify
1511 (outer_op
1512 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1513 (with
1514 {
1515 bool fail = false;
1516 wide_int zero_mask_not;
1517 wide_int C0;
1518 wide_int cst_emit;
1519
1520 if (TREE_CODE (@2) == SSA_NAME)
1521 zero_mask_not = get_nonzero_bits (@2);
1522 else
1523 fail = true;
1524
1525 if (inner_op == BIT_XOR_EXPR)
1526 {
1527 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1528 cst_emit = C0 | wi::to_wide (@1);
1529 }
1530 else
1531 {
1532 C0 = wi::to_wide (@0);
1533 cst_emit = C0 ^ wi::to_wide (@1);
1534 }
1535 }
1536 (if (!fail && (C0 & zero_mask_not) == 0)
1537 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1538 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1539 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1540
1541 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1542 (simplify
1543 (pointer_plus (pointer_plus:s @0 @1) @3)
1544 (pointer_plus @0 (plus @1 @3)))
1545
1546 /* Pattern match
1547 tem1 = (long) ptr1;
1548 tem2 = (long) ptr2;
1549 tem3 = tem2 - tem1;
1550 tem4 = (unsigned long) tem3;
1551 tem5 = ptr1 + tem4;
1552 and produce
1553 tem5 = ptr2; */
1554 (simplify
1555 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1556 /* Conditionally look through a sign-changing conversion. */
1557 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1558 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1559 || (GENERIC && type == TREE_TYPE (@1))))
1560 @1))
1561 (simplify
1562 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1563 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1564 (convert @1)))
1565
1566 /* Pattern match
1567 tem = (sizetype) ptr;
1568 tem = tem & algn;
1569 tem = -tem;
1570 ... = ptr p+ tem;
1571 and produce the simpler and easier to analyze with respect to alignment
1572 ... = ptr & ~algn; */
1573 (simplify
1574 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1575 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1576 (bit_and @0 { algn; })))
1577
1578 /* Try folding difference of addresses. */
1579 (simplify
1580 (minus (convert ADDR_EXPR@0) (convert @1))
1581 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1582 (with { poly_int64 diff; }
1583 (if (ptr_difference_const (@0, @1, &diff))
1584 { build_int_cst_type (type, diff); }))))
1585 (simplify
1586 (minus (convert @0) (convert ADDR_EXPR@1))
1587 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1588 (with { poly_int64 diff; }
1589 (if (ptr_difference_const (@0, @1, &diff))
1590 { build_int_cst_type (type, diff); }))))
1591 (simplify
1592 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert?@3 @1))
1593 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1594 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1595 (with { poly_int64 diff; }
1596 (if (ptr_difference_const (@0, @1, &diff))
1597 { build_int_cst_type (type, diff); }))))
1598 (simplify
1599 (pointer_diff (convert?@2 @0) (convert?@3 ADDR_EXPR@1))
1600 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1601 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1602 (with { poly_int64 diff; }
1603 (if (ptr_difference_const (@0, @1, &diff))
1604 { build_int_cst_type (type, diff); }))))
1605
1606 /* If arg0 is derived from the address of an object or function, we may
1607 be able to fold this expression using the object or function's
1608 alignment. */
1609 (simplify
1610 (bit_and (convert? @0) INTEGER_CST@1)
1611 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1612 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1613 (with
1614 {
1615 unsigned int align;
1616 unsigned HOST_WIDE_INT bitpos;
1617 get_pointer_alignment_1 (@0, &align, &bitpos);
1618 }
1619 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1620 { wide_int_to_tree (type, (wi::to_wide (@1)
1621 & (bitpos / BITS_PER_UNIT))); }))))
1622
1623
1624 /* We can't reassociate at all for saturating types. */
1625 (if (!TYPE_SATURATING (type))
1626
1627 /* Contract negates. */
1628 /* A + (-B) -> A - B */
1629 (simplify
1630 (plus:c @0 (convert? (negate @1)))
1631 /* Apply STRIP_NOPS on the negate. */
1632 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1633 && !TYPE_OVERFLOW_SANITIZED (type))
1634 (with
1635 {
1636 tree t1 = type;
1637 if (INTEGRAL_TYPE_P (type)
1638 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1639 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1640 }
1641 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1642 /* A - (-B) -> A + B */
1643 (simplify
1644 (minus @0 (convert? (negate @1)))
1645 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1646 && !TYPE_OVERFLOW_SANITIZED (type))
1647 (with
1648 {
1649 tree t1 = type;
1650 if (INTEGRAL_TYPE_P (type)
1651 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1652 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1653 }
1654 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1655 /* -(T)(-A) -> (T)A
1656 Sign-extension is ok except for INT_MIN, which thankfully cannot
1657 happen without overflow. */
1658 (simplify
1659 (negate (convert (negate @1)))
1660 (if (INTEGRAL_TYPE_P (type)
1661 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1662 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1663 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1664 && !TYPE_OVERFLOW_SANITIZED (type)
1665 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1666 (convert @1)))
1667 (simplify
1668 (negate (convert negate_expr_p@1))
1669 (if (SCALAR_FLOAT_TYPE_P (type)
1670 && ((DECIMAL_FLOAT_TYPE_P (type)
1671 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1672 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1673 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1674 (convert (negate @1))))
1675 (simplify
1676 (negate (nop_convert (negate @1)))
1677 (if (!TYPE_OVERFLOW_SANITIZED (type)
1678 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1679 (view_convert @1)))
1680
1681 /* We can't reassociate floating-point unless -fassociative-math
1682 or fixed-point plus or minus because of saturation to +-Inf. */
1683 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1684 && !FIXED_POINT_TYPE_P (type))
1685
1686 /* Match patterns that allow contracting a plus-minus pair
1687 irrespective of overflow issues. */
1688 /* (A +- B) - A -> +- B */
1689 /* (A +- B) -+ B -> A */
1690 /* A - (A +- B) -> -+ B */
1691 /* A +- (B -+ A) -> +- B */
1692 (simplify
1693 (minus (plus:c @0 @1) @0)
1694 @1)
1695 (simplify
1696 (minus (minus @0 @1) @0)
1697 (negate @1))
1698 (simplify
1699 (plus:c (minus @0 @1) @1)
1700 @0)
1701 (simplify
1702 (minus @0 (plus:c @0 @1))
1703 (negate @1))
1704 (simplify
1705 (minus @0 (minus @0 @1))
1706 @1)
1707 /* (A +- B) + (C - A) -> C +- B */
1708 /* (A + B) - (A - C) -> B + C */
1709 /* More cases are handled with comparisons. */
1710 (simplify
1711 (plus:c (plus:c @0 @1) (minus @2 @0))
1712 (plus @2 @1))
1713 (simplify
1714 (plus:c (minus @0 @1) (minus @2 @0))
1715 (minus @2 @1))
1716 (simplify
1717 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1718 (if (TYPE_OVERFLOW_UNDEFINED (type)
1719 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1720 (pointer_diff @2 @1)))
1721 (simplify
1722 (minus (plus:c @0 @1) (minus @0 @2))
1723 (plus @1 @2))
1724
1725 /* (A +- CST1) +- CST2 -> A + CST3
1726 Use view_convert because it is safe for vectors and equivalent for
1727 scalars. */
1728 (for outer_op (plus minus)
1729 (for inner_op (plus minus)
1730 neg_inner_op (minus plus)
1731 (simplify
1732 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1733 CONSTANT_CLASS_P@2)
1734 /* If one of the types wraps, use that one. */
1735 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1736 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1737 forever if something doesn't simplify into a constant. */
1738 (if (!CONSTANT_CLASS_P (@0))
1739 (if (outer_op == PLUS_EXPR)
1740 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1741 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
1742 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1743 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1744 (if (outer_op == PLUS_EXPR)
1745 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1746 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1747 /* If the constant operation overflows we cannot do the transform
1748 directly as we would introduce undefined overflow, for example
1749 with (a - 1) + INT_MIN. */
1750 (if (types_match (type, @0))
1751 (with { tree cst = const_binop (outer_op == inner_op
1752 ? PLUS_EXPR : MINUS_EXPR,
1753 type, @1, @2); }
1754 (if (cst && !TREE_OVERFLOW (cst))
1755 (inner_op @0 { cst; } )
1756 /* X+INT_MAX+1 is X-INT_MIN. */
1757 (if (INTEGRAL_TYPE_P (type) && cst
1758 && wi::to_wide (cst) == wi::min_value (type))
1759 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1760 /* Last resort, use some unsigned type. */
1761 (with { tree utype = unsigned_type_for (type); }
1762 (view_convert (inner_op
1763 (view_convert:utype @0)
1764 (view_convert:utype
1765 { drop_tree_overflow (cst); })))))))))))))
1766
1767 /* (CST1 - A) +- CST2 -> CST3 - A */
1768 (for outer_op (plus minus)
1769 (simplify
1770 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1771 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1772 (if (cst && !TREE_OVERFLOW (cst))
1773 (minus { cst; } @0)))))
1774
1775 /* CST1 - (CST2 - A) -> CST3 + A */
1776 (simplify
1777 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1778 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1779 (if (cst && !TREE_OVERFLOW (cst))
1780 (plus { cst; } @0))))
1781
1782 /* ~A + A -> -1 */
1783 (simplify
1784 (plus:c (bit_not @0) @0)
1785 (if (!TYPE_OVERFLOW_TRAPS (type))
1786 { build_all_ones_cst (type); }))
1787
1788 /* ~A + 1 -> -A */
1789 (simplify
1790 (plus (convert? (bit_not @0)) integer_each_onep)
1791 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1792 (negate (convert @0))))
1793
1794 /* -A - 1 -> ~A */
1795 (simplify
1796 (minus (convert? (negate @0)) integer_each_onep)
1797 (if (!TYPE_OVERFLOW_TRAPS (type)
1798 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1799 (bit_not (convert @0))))
1800
1801 /* -1 - A -> ~A */
1802 (simplify
1803 (minus integer_all_onesp @0)
1804 (bit_not @0))
1805
1806 /* (T)(P + A) - (T)P -> (T) A */
1807 (simplify
1808 (minus (convert (plus:c @@0 @1))
1809 (convert? @0))
1810 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1811 /* For integer types, if A has a smaller type
1812 than T the result depends on the possible
1813 overflow in P + A.
1814 E.g. T=size_t, A=(unsigned)429497295, P>0.
1815 However, if an overflow in P + A would cause
1816 undefined behavior, we can assume that there
1817 is no overflow. */
1818 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1819 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1820 (convert @1)))
1821 (simplify
1822 (minus (convert (pointer_plus @@0 @1))
1823 (convert @0))
1824 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1825 /* For pointer types, if the conversion of A to the
1826 final type requires a sign- or zero-extension,
1827 then we have to punt - it is not defined which
1828 one is correct. */
1829 || (POINTER_TYPE_P (TREE_TYPE (@0))
1830 && TREE_CODE (@1) == INTEGER_CST
1831 && tree_int_cst_sign_bit (@1) == 0))
1832 (convert @1)))
1833 (simplify
1834 (pointer_diff (pointer_plus @@0 @1) @0)
1835 /* The second argument of pointer_plus must be interpreted as signed, and
1836 thus sign-extended if necessary. */
1837 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1838 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
1839 second arg is unsigned even when we need to consider it as signed,
1840 we don't want to diagnose overflow here. */
1841 (convert (view_convert:stype @1))))
1842
1843 /* (T)P - (T)(P + A) -> -(T) A */
1844 (simplify
1845 (minus (convert? @0)
1846 (convert (plus:c @@0 @1)))
1847 (if (INTEGRAL_TYPE_P (type)
1848 && TYPE_OVERFLOW_UNDEFINED (type)
1849 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1850 (with { tree utype = unsigned_type_for (type); }
1851 (convert (negate (convert:utype @1))))
1852 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1853 /* For integer types, if A has a smaller type
1854 than T the result depends on the possible
1855 overflow in P + A.
1856 E.g. T=size_t, A=(unsigned)429497295, P>0.
1857 However, if an overflow in P + A would cause
1858 undefined behavior, we can assume that there
1859 is no overflow. */
1860 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1861 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1862 (negate (convert @1)))))
1863 (simplify
1864 (minus (convert @0)
1865 (convert (pointer_plus @@0 @1)))
1866 (if (INTEGRAL_TYPE_P (type)
1867 && TYPE_OVERFLOW_UNDEFINED (type)
1868 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1869 (with { tree utype = unsigned_type_for (type); }
1870 (convert (negate (convert:utype @1))))
1871 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1872 /* For pointer types, if the conversion of A to the
1873 final type requires a sign- or zero-extension,
1874 then we have to punt - it is not defined which
1875 one is correct. */
1876 || (POINTER_TYPE_P (TREE_TYPE (@0))
1877 && TREE_CODE (@1) == INTEGER_CST
1878 && tree_int_cst_sign_bit (@1) == 0))
1879 (negate (convert @1)))))
1880 (simplify
1881 (pointer_diff @0 (pointer_plus @@0 @1))
1882 /* The second argument of pointer_plus must be interpreted as signed, and
1883 thus sign-extended if necessary. */
1884 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1885 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
1886 second arg is unsigned even when we need to consider it as signed,
1887 we don't want to diagnose overflow here. */
1888 (negate (convert (view_convert:stype @1)))))
1889
1890 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1891 (simplify
1892 (minus (convert (plus:c @@0 @1))
1893 (convert (plus:c @0 @2)))
1894 (if (INTEGRAL_TYPE_P (type)
1895 && TYPE_OVERFLOW_UNDEFINED (type)
1896 && element_precision (type) <= element_precision (TREE_TYPE (@1))
1897 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
1898 (with { tree utype = unsigned_type_for (type); }
1899 (convert (minus (convert:utype @1) (convert:utype @2))))
1900 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
1901 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
1902 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
1903 /* For integer types, if A has a smaller type
1904 than T the result depends on the possible
1905 overflow in P + A.
1906 E.g. T=size_t, A=(unsigned)429497295, P>0.
1907 However, if an overflow in P + A would cause
1908 undefined behavior, we can assume that there
1909 is no overflow. */
1910 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1911 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
1912 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
1913 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
1914 (minus (convert @1) (convert @2)))))
1915 (simplify
1916 (minus (convert (pointer_plus @@0 @1))
1917 (convert (pointer_plus @0 @2)))
1918 (if (INTEGRAL_TYPE_P (type)
1919 && TYPE_OVERFLOW_UNDEFINED (type)
1920 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1921 (with { tree utype = unsigned_type_for (type); }
1922 (convert (minus (convert:utype @1) (convert:utype @2))))
1923 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1924 /* For pointer types, if the conversion of A to the
1925 final type requires a sign- or zero-extension,
1926 then we have to punt - it is not defined which
1927 one is correct. */
1928 || (POINTER_TYPE_P (TREE_TYPE (@0))
1929 && TREE_CODE (@1) == INTEGER_CST
1930 && tree_int_cst_sign_bit (@1) == 0
1931 && TREE_CODE (@2) == INTEGER_CST
1932 && tree_int_cst_sign_bit (@2) == 0))
1933 (minus (convert @1) (convert @2)))))
1934 (simplify
1935 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
1936 /* The second argument of pointer_plus must be interpreted as signed, and
1937 thus sign-extended if necessary. */
1938 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1939 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
1940 second arg is unsigned even when we need to consider it as signed,
1941 we don't want to diagnose overflow here. */
1942 (minus (convert (view_convert:stype @1))
1943 (convert (view_convert:stype @2)))))))
1944
1945 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
1946 Modeled after fold_plusminus_mult_expr. */
1947 (if (!TYPE_SATURATING (type)
1948 && (!FLOAT_TYPE_P (type) || flag_associative_math))
1949 (for plusminus (plus minus)
1950 (simplify
1951 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
1952 (if ((!ANY_INTEGRAL_TYPE_P (type)
1953 || TYPE_OVERFLOW_WRAPS (type)
1954 || (INTEGRAL_TYPE_P (type)
1955 && tree_expr_nonzero_p (@0)
1956 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
1957 /* If @1 +- @2 is constant require a hard single-use on either
1958 original operand (but not on both). */
1959 && (single_use (@3) || single_use (@4)))
1960 (mult (plusminus @1 @2) @0)))
1961 /* We cannot generate constant 1 for fract. */
1962 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
1963 (simplify
1964 (plusminus @0 (mult:c@3 @0 @2))
1965 (if ((!ANY_INTEGRAL_TYPE_P (type)
1966 || TYPE_OVERFLOW_WRAPS (type)
1967 || (INTEGRAL_TYPE_P (type)
1968 && tree_expr_nonzero_p (@0)
1969 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
1970 && single_use (@3))
1971 (mult (plusminus { build_one_cst (type); } @2) @0)))
1972 (simplify
1973 (plusminus (mult:c@3 @0 @2) @0)
1974 (if ((!ANY_INTEGRAL_TYPE_P (type)
1975 || TYPE_OVERFLOW_WRAPS (type)
1976 || (INTEGRAL_TYPE_P (type)
1977 && tree_expr_nonzero_p (@0)
1978 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
1979 && single_use (@3))
1980 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
1981
1982 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1983
1984 (for minmax (min max FMIN_ALL FMAX_ALL)
1985 (simplify
1986 (minmax @0 @0)
1987 @0))
1988 /* min(max(x,y),y) -> y. */
1989 (simplify
1990 (min:c (max:c @0 @1) @1)
1991 @1)
1992 /* max(min(x,y),y) -> y. */
1993 (simplify
1994 (max:c (min:c @0 @1) @1)
1995 @1)
1996 /* max(a,-a) -> abs(a). */
1997 (simplify
1998 (max:c @0 (negate @0))
1999 (if (TREE_CODE (type) != COMPLEX_TYPE
2000 && (! ANY_INTEGRAL_TYPE_P (type)
2001 || TYPE_OVERFLOW_UNDEFINED (type)))
2002 (abs @0)))
2003 /* min(a,-a) -> -abs(a). */
2004 (simplify
2005 (min:c @0 (negate @0))
2006 (if (TREE_CODE (type) != COMPLEX_TYPE
2007 && (! ANY_INTEGRAL_TYPE_P (type)
2008 || TYPE_OVERFLOW_UNDEFINED (type)))
2009 (negate (abs @0))))
2010 (simplify
2011 (min @0 @1)
2012 (switch
2013 (if (INTEGRAL_TYPE_P (type)
2014 && TYPE_MIN_VALUE (type)
2015 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2016 @1)
2017 (if (INTEGRAL_TYPE_P (type)
2018 && TYPE_MAX_VALUE (type)
2019 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2020 @0)))
2021 (simplify
2022 (max @0 @1)
2023 (switch
2024 (if (INTEGRAL_TYPE_P (type)
2025 && TYPE_MAX_VALUE (type)
2026 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2027 @1)
2028 (if (INTEGRAL_TYPE_P (type)
2029 && TYPE_MIN_VALUE (type)
2030 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2031 @0)))
2032
2033 /* max (a, a + CST) -> a + CST where CST is positive. */
2034 /* max (a, a + CST) -> a where CST is negative. */
2035 (simplify
2036 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2037 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2038 (if (tree_int_cst_sgn (@1) > 0)
2039 @2
2040 @0)))
2041
2042 /* min (a, a + CST) -> a where CST is positive. */
2043 /* min (a, a + CST) -> a + CST where CST is negative. */
2044 (simplify
2045 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2046 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2047 (if (tree_int_cst_sgn (@1) > 0)
2048 @0
2049 @2)))
2050
2051 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2052 and the outer convert demotes the expression back to x's type. */
2053 (for minmax (min max)
2054 (simplify
2055 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2056 (if (INTEGRAL_TYPE_P (type)
2057 && types_match (@1, type) && int_fits_type_p (@2, type)
2058 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2059 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2060 (minmax @1 (convert @2)))))
2061
2062 (for minmax (FMIN_ALL FMAX_ALL)
2063 /* If either argument is NaN, return the other one. Avoid the
2064 transformation if we get (and honor) a signalling NaN. */
2065 (simplify
2066 (minmax:c @0 REAL_CST@1)
2067 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2068 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2069 @0)))
2070 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2071 functions to return the numeric arg if the other one is NaN.
2072 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2073 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2074 worry about it either. */
2075 (if (flag_finite_math_only)
2076 (simplify
2077 (FMIN_ALL @0 @1)
2078 (min @0 @1))
2079 (simplify
2080 (FMAX_ALL @0 @1)
2081 (max @0 @1)))
2082 /* min (-A, -B) -> -max (A, B) */
2083 (for minmax (min max FMIN_ALL FMAX_ALL)
2084 maxmin (max min FMAX_ALL FMIN_ALL)
2085 (simplify
2086 (minmax (negate:s@2 @0) (negate:s@3 @1))
2087 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2088 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2089 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2090 (negate (maxmin @0 @1)))))
2091 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2092 MAX (~X, ~Y) -> ~MIN (X, Y) */
2093 (for minmax (min max)
2094 maxmin (max min)
2095 (simplify
2096 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2097 (bit_not (maxmin @0 @1))))
2098
2099 /* MIN (X, Y) == X -> X <= Y */
2100 (for minmax (min min max max)
2101 cmp (eq ne eq ne )
2102 out (le gt ge lt )
2103 (simplify
2104 (cmp:c (minmax:c @0 @1) @0)
2105 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2106 (out @0 @1))))
2107 /* MIN (X, 5) == 0 -> X == 0
2108 MIN (X, 5) == 7 -> false */
2109 (for cmp (eq ne)
2110 (simplify
2111 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2112 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2113 TYPE_SIGN (TREE_TYPE (@0))))
2114 { constant_boolean_node (cmp == NE_EXPR, type); }
2115 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2116 TYPE_SIGN (TREE_TYPE (@0))))
2117 (cmp @0 @2)))))
2118 (for cmp (eq ne)
2119 (simplify
2120 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2121 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2122 TYPE_SIGN (TREE_TYPE (@0))))
2123 { constant_boolean_node (cmp == NE_EXPR, type); }
2124 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2125 TYPE_SIGN (TREE_TYPE (@0))))
2126 (cmp @0 @2)))))
2127 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2128 (for minmax (min min max max min min max max )
2129 cmp (lt le gt ge gt ge lt le )
2130 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2131 (simplify
2132 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2133 (comb (cmp @0 @2) (cmp @1 @2))))
2134
2135 /* Simplifications of shift and rotates. */
2136
2137 (for rotate (lrotate rrotate)
2138 (simplify
2139 (rotate integer_all_onesp@0 @1)
2140 @0))
2141
2142 /* Optimize -1 >> x for arithmetic right shifts. */
2143 (simplify
2144 (rshift integer_all_onesp@0 @1)
2145 (if (!TYPE_UNSIGNED (type)
2146 && tree_expr_nonnegative_p (@1))
2147 @0))
2148
2149 /* Optimize (x >> c) << c into x & (-1<<c). */
2150 (simplify
2151 (lshift (rshift @0 INTEGER_CST@1) @1)
2152 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2153 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2154
2155 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2156 types. */
2157 (simplify
2158 (rshift (lshift @0 INTEGER_CST@1) @1)
2159 (if (TYPE_UNSIGNED (type)
2160 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2161 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2162
2163 (for shiftrotate (lrotate rrotate lshift rshift)
2164 (simplify
2165 (shiftrotate @0 integer_zerop)
2166 (non_lvalue @0))
2167 (simplify
2168 (shiftrotate integer_zerop@0 @1)
2169 @0)
2170 /* Prefer vector1 << scalar to vector1 << vector2
2171 if vector2 is uniform. */
2172 (for vec (VECTOR_CST CONSTRUCTOR)
2173 (simplify
2174 (shiftrotate @0 vec@1)
2175 (with { tree tem = uniform_vector_p (@1); }
2176 (if (tem)
2177 (shiftrotate @0 { tem; }))))))
2178
2179 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2180 Y is 0. Similarly for X >> Y. */
2181 #if GIMPLE
2182 (for shift (lshift rshift)
2183 (simplify
2184 (shift @0 SSA_NAME@1)
2185 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2186 (with {
2187 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2188 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2189 }
2190 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2191 @0)))))
2192 #endif
2193
2194 /* Rewrite an LROTATE_EXPR by a constant into an
2195 RROTATE_EXPR by a new constant. */
2196 (simplify
2197 (lrotate @0 INTEGER_CST@1)
2198 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2199 build_int_cst (TREE_TYPE (@1),
2200 element_precision (type)), @1); }))
2201
2202 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2203 (for op (lrotate rrotate rshift lshift)
2204 (simplify
2205 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2206 (with { unsigned int prec = element_precision (type); }
2207 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2208 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2209 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2210 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2211 (with { unsigned int low = (tree_to_uhwi (@1)
2212 + tree_to_uhwi (@2)); }
2213 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2214 being well defined. */
2215 (if (low >= prec)
2216 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2217 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2218 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2219 { build_zero_cst (type); }
2220 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2221 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2222
2223
2224 /* ((1 << A) & 1) != 0 -> A == 0
2225 ((1 << A) & 1) == 0 -> A != 0 */
2226 (for cmp (ne eq)
2227 icmp (eq ne)
2228 (simplify
2229 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2230 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2231
2232 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2233 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2234 if CST2 != 0. */
2235 (for cmp (ne eq)
2236 (simplify
2237 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2238 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2239 (if (cand < 0
2240 || (!integer_zerop (@2)
2241 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2242 { constant_boolean_node (cmp == NE_EXPR, type); }
2243 (if (!integer_zerop (@2)
2244 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2245 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2246
2247 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2248 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2249 if the new mask might be further optimized. */
2250 (for shift (lshift rshift)
2251 (simplify
2252 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2253 INTEGER_CST@2)
2254 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2255 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2256 && tree_fits_uhwi_p (@1)
2257 && tree_to_uhwi (@1) > 0
2258 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2259 (with
2260 {
2261 unsigned int shiftc = tree_to_uhwi (@1);
2262 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2263 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2264 tree shift_type = TREE_TYPE (@3);
2265 unsigned int prec;
2266
2267 if (shift == LSHIFT_EXPR)
2268 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2269 else if (shift == RSHIFT_EXPR
2270 && type_has_mode_precision_p (shift_type))
2271 {
2272 prec = TYPE_PRECISION (TREE_TYPE (@3));
2273 tree arg00 = @0;
2274 /* See if more bits can be proven as zero because of
2275 zero extension. */
2276 if (@3 != @0
2277 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2278 {
2279 tree inner_type = TREE_TYPE (@0);
2280 if (type_has_mode_precision_p (inner_type)
2281 && TYPE_PRECISION (inner_type) < prec)
2282 {
2283 prec = TYPE_PRECISION (inner_type);
2284 /* See if we can shorten the right shift. */
2285 if (shiftc < prec)
2286 shift_type = inner_type;
2287 /* Otherwise X >> C1 is all zeros, so we'll optimize
2288 it into (X, 0) later on by making sure zerobits
2289 is all ones. */
2290 }
2291 }
2292 zerobits = HOST_WIDE_INT_M1U;
2293 if (shiftc < prec)
2294 {
2295 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2296 zerobits <<= prec - shiftc;
2297 }
2298 /* For arithmetic shift if sign bit could be set, zerobits
2299 can contain actually sign bits, so no transformation is
2300 possible, unless MASK masks them all away. In that
2301 case the shift needs to be converted into logical shift. */
2302 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2303 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2304 {
2305 if ((mask & zerobits) == 0)
2306 shift_type = unsigned_type_for (TREE_TYPE (@3));
2307 else
2308 zerobits = 0;
2309 }
2310 }
2311 }
2312 /* ((X << 16) & 0xff00) is (X, 0). */
2313 (if ((mask & zerobits) == mask)
2314 { build_int_cst (type, 0); }
2315 (with { newmask = mask | zerobits; }
2316 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2317 (with
2318 {
2319 /* Only do the transformation if NEWMASK is some integer
2320 mode's mask. */
2321 for (prec = BITS_PER_UNIT;
2322 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2323 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2324 break;
2325 }
2326 (if (prec < HOST_BITS_PER_WIDE_INT
2327 || newmask == HOST_WIDE_INT_M1U)
2328 (with
2329 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2330 (if (!tree_int_cst_equal (newmaskt, @2))
2331 (if (shift_type != TREE_TYPE (@3))
2332 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2333 (bit_and @4 { newmaskt; })))))))))))))
2334
2335 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2336 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2337 (for shift (lshift rshift)
2338 (for bit_op (bit_and bit_xor bit_ior)
2339 (simplify
2340 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2341 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2342 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2343 (bit_op (shift (convert @0) @1) { mask; }))))))
2344
2345 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2346 (simplify
2347 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2348 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2349 && (element_precision (TREE_TYPE (@0))
2350 <= element_precision (TREE_TYPE (@1))
2351 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2352 (with
2353 { tree shift_type = TREE_TYPE (@0); }
2354 (convert (rshift (convert:shift_type @1) @2)))))
2355
2356 /* ~(~X >>r Y) -> X >>r Y
2357 ~(~X <<r Y) -> X <<r Y */
2358 (for rotate (lrotate rrotate)
2359 (simplify
2360 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2361 (if ((element_precision (TREE_TYPE (@0))
2362 <= element_precision (TREE_TYPE (@1))
2363 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2364 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2365 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2366 (with
2367 { tree rotate_type = TREE_TYPE (@0); }
2368 (convert (rotate (convert:rotate_type @1) @2))))))
2369
2370 /* Simplifications of conversions. */
2371
2372 /* Basic strip-useless-type-conversions / strip_nops. */
2373 (for cvt (convert view_convert float fix_trunc)
2374 (simplify
2375 (cvt @0)
2376 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2377 || (GENERIC && type == TREE_TYPE (@0)))
2378 @0)))
2379
2380 /* Contract view-conversions. */
2381 (simplify
2382 (view_convert (view_convert @0))
2383 (view_convert @0))
2384
2385 /* For integral conversions with the same precision or pointer
2386 conversions use a NOP_EXPR instead. */
2387 (simplify
2388 (view_convert @0)
2389 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2390 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2391 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2392 (convert @0)))
2393
2394 /* Strip inner integral conversions that do not change precision or size, or
2395 zero-extend while keeping the same size (for bool-to-char). */
2396 (simplify
2397 (view_convert (convert@0 @1))
2398 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2399 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2400 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2401 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2402 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2403 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2404 (view_convert @1)))
2405
2406 /* Re-association barriers around constants and other re-association
2407 barriers can be removed. */
2408 (simplify
2409 (paren CONSTANT_CLASS_P@0)
2410 @0)
2411 (simplify
2412 (paren (paren@1 @0))
2413 @1)
2414
2415 /* Handle cases of two conversions in a row. */
2416 (for ocvt (convert float fix_trunc)
2417 (for icvt (convert float)
2418 (simplify
2419 (ocvt (icvt@1 @0))
2420 (with
2421 {
2422 tree inside_type = TREE_TYPE (@0);
2423 tree inter_type = TREE_TYPE (@1);
2424 int inside_int = INTEGRAL_TYPE_P (inside_type);
2425 int inside_ptr = POINTER_TYPE_P (inside_type);
2426 int inside_float = FLOAT_TYPE_P (inside_type);
2427 int inside_vec = VECTOR_TYPE_P (inside_type);
2428 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2429 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2430 int inter_int = INTEGRAL_TYPE_P (inter_type);
2431 int inter_ptr = POINTER_TYPE_P (inter_type);
2432 int inter_float = FLOAT_TYPE_P (inter_type);
2433 int inter_vec = VECTOR_TYPE_P (inter_type);
2434 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2435 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2436 int final_int = INTEGRAL_TYPE_P (type);
2437 int final_ptr = POINTER_TYPE_P (type);
2438 int final_float = FLOAT_TYPE_P (type);
2439 int final_vec = VECTOR_TYPE_P (type);
2440 unsigned int final_prec = TYPE_PRECISION (type);
2441 int final_unsignedp = TYPE_UNSIGNED (type);
2442 }
2443 (switch
2444 /* In addition to the cases of two conversions in a row
2445 handled below, if we are converting something to its own
2446 type via an object of identical or wider precision, neither
2447 conversion is needed. */
2448 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2449 || (GENERIC
2450 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2451 && (((inter_int || inter_ptr) && final_int)
2452 || (inter_float && final_float))
2453 && inter_prec >= final_prec)
2454 (ocvt @0))
2455
2456 /* Likewise, if the intermediate and initial types are either both
2457 float or both integer, we don't need the middle conversion if the
2458 former is wider than the latter and doesn't change the signedness
2459 (for integers). Avoid this if the final type is a pointer since
2460 then we sometimes need the middle conversion. */
2461 (if (((inter_int && inside_int) || (inter_float && inside_float))
2462 && (final_int || final_float)
2463 && inter_prec >= inside_prec
2464 && (inter_float || inter_unsignedp == inside_unsignedp))
2465 (ocvt @0))
2466
2467 /* If we have a sign-extension of a zero-extended value, we can
2468 replace that by a single zero-extension. Likewise if the
2469 final conversion does not change precision we can drop the
2470 intermediate conversion. */
2471 (if (inside_int && inter_int && final_int
2472 && ((inside_prec < inter_prec && inter_prec < final_prec
2473 && inside_unsignedp && !inter_unsignedp)
2474 || final_prec == inter_prec))
2475 (ocvt @0))
2476
2477 /* Two conversions in a row are not needed unless:
2478 - some conversion is floating-point (overstrict for now), or
2479 - some conversion is a vector (overstrict for now), or
2480 - the intermediate type is narrower than both initial and
2481 final, or
2482 - the intermediate type and innermost type differ in signedness,
2483 and the outermost type is wider than the intermediate, or
2484 - the initial type is a pointer type and the precisions of the
2485 intermediate and final types differ, or
2486 - the final type is a pointer type and the precisions of the
2487 initial and intermediate types differ. */
2488 (if (! inside_float && ! inter_float && ! final_float
2489 && ! inside_vec && ! inter_vec && ! final_vec
2490 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2491 && ! (inside_int && inter_int
2492 && inter_unsignedp != inside_unsignedp
2493 && inter_prec < final_prec)
2494 && ((inter_unsignedp && inter_prec > inside_prec)
2495 == (final_unsignedp && final_prec > inter_prec))
2496 && ! (inside_ptr && inter_prec != final_prec)
2497 && ! (final_ptr && inside_prec != inter_prec))
2498 (ocvt @0))
2499
2500 /* A truncation to an unsigned type (a zero-extension) should be
2501 canonicalized as bitwise and of a mask. */
2502 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2503 && final_int && inter_int && inside_int
2504 && final_prec == inside_prec
2505 && final_prec > inter_prec
2506 && inter_unsignedp)
2507 (convert (bit_and @0 { wide_int_to_tree
2508 (inside_type,
2509 wi::mask (inter_prec, false,
2510 TYPE_PRECISION (inside_type))); })))
2511
2512 /* If we are converting an integer to a floating-point that can
2513 represent it exactly and back to an integer, we can skip the
2514 floating-point conversion. */
2515 (if (GIMPLE /* PR66211 */
2516 && inside_int && inter_float && final_int &&
2517 (unsigned) significand_size (TYPE_MODE (inter_type))
2518 >= inside_prec - !inside_unsignedp)
2519 (convert @0)))))))
2520
2521 /* If we have a narrowing conversion to an integral type that is fed by a
2522 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2523 masks off bits outside the final type (and nothing else). */
2524 (simplify
2525 (convert (bit_and @0 INTEGER_CST@1))
2526 (if (INTEGRAL_TYPE_P (type)
2527 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2528 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2529 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2530 TYPE_PRECISION (type)), 0))
2531 (convert @0)))
2532
2533
2534 /* (X /[ex] A) * A -> X. */
2535 (simplify
2536 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2537 (convert @0))
2538
2539 /* Canonicalization of binary operations. */
2540
2541 /* Convert X + -C into X - C. */
2542 (simplify
2543 (plus @0 REAL_CST@1)
2544 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2545 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2546 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2547 (minus @0 { tem; })))))
2548
2549 /* Convert x+x into x*2. */
2550 (simplify
2551 (plus @0 @0)
2552 (if (SCALAR_FLOAT_TYPE_P (type))
2553 (mult @0 { build_real (type, dconst2); })
2554 (if (INTEGRAL_TYPE_P (type))
2555 (mult @0 { build_int_cst (type, 2); }))))
2556
2557 /* 0 - X -> -X. */
2558 (simplify
2559 (minus integer_zerop @1)
2560 (negate @1))
2561 (simplify
2562 (pointer_diff integer_zerop @1)
2563 (negate (convert @1)))
2564
2565 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2566 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2567 (-ARG1 + ARG0) reduces to -ARG1. */
2568 (simplify
2569 (minus real_zerop@0 @1)
2570 (if (fold_real_zero_addition_p (type, @0, 0))
2571 (negate @1)))
2572
2573 /* Transform x * -1 into -x. */
2574 (simplify
2575 (mult @0 integer_minus_onep)
2576 (negate @0))
2577
2578 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2579 signed overflow for CST != 0 && CST != -1. */
2580 (simplify
2581 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
2582 (if (TREE_CODE (@2) != INTEGER_CST
2583 && single_use (@3)
2584 && !integer_zerop (@1) && !integer_minus_onep (@1))
2585 (mult (mult @0 @2) @1)))
2586
2587 /* True if we can easily extract the real and imaginary parts of a complex
2588 number. */
2589 (match compositional_complex
2590 (convert? (complex @0 @1)))
2591
2592 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2593 (simplify
2594 (complex (realpart @0) (imagpart @0))
2595 @0)
2596 (simplify
2597 (realpart (complex @0 @1))
2598 @0)
2599 (simplify
2600 (imagpart (complex @0 @1))
2601 @1)
2602
2603 /* Sometimes we only care about half of a complex expression. */
2604 (simplify
2605 (realpart (convert?:s (conj:s @0)))
2606 (convert (realpart @0)))
2607 (simplify
2608 (imagpart (convert?:s (conj:s @0)))
2609 (convert (negate (imagpart @0))))
2610 (for part (realpart imagpart)
2611 (for op (plus minus)
2612 (simplify
2613 (part (convert?:s@2 (op:s @0 @1)))
2614 (convert (op (part @0) (part @1))))))
2615 (simplify
2616 (realpart (convert?:s (CEXPI:s @0)))
2617 (convert (COS @0)))
2618 (simplify
2619 (imagpart (convert?:s (CEXPI:s @0)))
2620 (convert (SIN @0)))
2621
2622 /* conj(conj(x)) -> x */
2623 (simplify
2624 (conj (convert? (conj @0)))
2625 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2626 (convert @0)))
2627
2628 /* conj({x,y}) -> {x,-y} */
2629 (simplify
2630 (conj (convert?:s (complex:s @0 @1)))
2631 (with { tree itype = TREE_TYPE (type); }
2632 (complex (convert:itype @0) (negate (convert:itype @1)))))
2633
2634 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2635 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2636 (simplify
2637 (bswap (bswap @0))
2638 @0)
2639 (simplify
2640 (bswap (bit_not (bswap @0)))
2641 (bit_not @0))
2642 (for bitop (bit_xor bit_ior bit_and)
2643 (simplify
2644 (bswap (bitop:c (bswap @0) @1))
2645 (bitop @0 (bswap @1)))))
2646
2647
2648 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2649
2650 /* Simplify constant conditions.
2651 Only optimize constant conditions when the selected branch
2652 has the same type as the COND_EXPR. This avoids optimizing
2653 away "c ? x : throw", where the throw has a void type.
2654 Note that we cannot throw away the fold-const.c variant nor
2655 this one as we depend on doing this transform before possibly
2656 A ? B : B -> B triggers and the fold-const.c one can optimize
2657 0 ? A : B to B even if A has side-effects. Something
2658 genmatch cannot handle. */
2659 (simplify
2660 (cond INTEGER_CST@0 @1 @2)
2661 (if (integer_zerop (@0))
2662 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2663 @2)
2664 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2665 @1)))
2666 (simplify
2667 (vec_cond VECTOR_CST@0 @1 @2)
2668 (if (integer_all_onesp (@0))
2669 @1
2670 (if (integer_zerop (@0))
2671 @2)))
2672
2673 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2674 be extended. */
2675 /* This pattern implements two kinds simplification:
2676
2677 Case 1)
2678 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2679 1) Conversions are type widening from smaller type.
2680 2) Const c1 equals to c2 after canonicalizing comparison.
2681 3) Comparison has tree code LT, LE, GT or GE.
2682 This specific pattern is needed when (cmp (convert x) c) may not
2683 be simplified by comparison patterns because of multiple uses of
2684 x. It also makes sense here because simplifying across multiple
2685 referred var is always benefitial for complicated cases.
2686
2687 Case 2)
2688 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2689 (for cmp (lt le gt ge eq)
2690 (simplify
2691 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2692 (with
2693 {
2694 tree from_type = TREE_TYPE (@1);
2695 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2696 enum tree_code code = ERROR_MARK;
2697
2698 if (INTEGRAL_TYPE_P (from_type)
2699 && int_fits_type_p (@2, from_type)
2700 && (types_match (c1_type, from_type)
2701 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2702 && (TYPE_UNSIGNED (from_type)
2703 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2704 && (types_match (c2_type, from_type)
2705 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2706 && (TYPE_UNSIGNED (from_type)
2707 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2708 {
2709 if (cmp != EQ_EXPR)
2710 {
2711 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2712 {
2713 /* X <= Y - 1 equals to X < Y. */
2714 if (cmp == LE_EXPR)
2715 code = LT_EXPR;
2716 /* X > Y - 1 equals to X >= Y. */
2717 if (cmp == GT_EXPR)
2718 code = GE_EXPR;
2719 }
2720 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2721 {
2722 /* X < Y + 1 equals to X <= Y. */
2723 if (cmp == LT_EXPR)
2724 code = LE_EXPR;
2725 /* X >= Y + 1 equals to X > Y. */
2726 if (cmp == GE_EXPR)
2727 code = GT_EXPR;
2728 }
2729 if (code != ERROR_MARK
2730 || wi::to_widest (@2) == wi::to_widest (@3))
2731 {
2732 if (cmp == LT_EXPR || cmp == LE_EXPR)
2733 code = MIN_EXPR;
2734 if (cmp == GT_EXPR || cmp == GE_EXPR)
2735 code = MAX_EXPR;
2736 }
2737 }
2738 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2739 else if (int_fits_type_p (@3, from_type))
2740 code = EQ_EXPR;
2741 }
2742 }
2743 (if (code == MAX_EXPR)
2744 (convert (max @1 (convert @2)))
2745 (if (code == MIN_EXPR)
2746 (convert (min @1 (convert @2)))
2747 (if (code == EQ_EXPR)
2748 (convert (cond (eq @1 (convert @3))
2749 (convert:from_type @3) (convert:from_type @2)))))))))
2750
2751 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2752
2753 1) OP is PLUS or MINUS.
2754 2) CMP is LT, LE, GT or GE.
2755 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2756
2757 This pattern also handles special cases like:
2758
2759 A) Operand x is a unsigned to signed type conversion and c1 is
2760 integer zero. In this case,
2761 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2762 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2763 B) Const c1 may not equal to (C3 op' C2). In this case we also
2764 check equality for (c1+1) and (c1-1) by adjusting comparison
2765 code.
2766
2767 TODO: Though signed type is handled by this pattern, it cannot be
2768 simplified at the moment because C standard requires additional
2769 type promotion. In order to match&simplify it here, the IR needs
2770 to be cleaned up by other optimizers, i.e, VRP. */
2771 (for op (plus minus)
2772 (for cmp (lt le gt ge)
2773 (simplify
2774 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2775 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2776 (if (types_match (from_type, to_type)
2777 /* Check if it is special case A). */
2778 || (TYPE_UNSIGNED (from_type)
2779 && !TYPE_UNSIGNED (to_type)
2780 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2781 && integer_zerop (@1)
2782 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2783 (with
2784 {
2785 bool overflow = false;
2786 enum tree_code code, cmp_code = cmp;
2787 wide_int real_c1;
2788 wide_int c1 = wi::to_wide (@1);
2789 wide_int c2 = wi::to_wide (@2);
2790 wide_int c3 = wi::to_wide (@3);
2791 signop sgn = TYPE_SIGN (from_type);
2792
2793 /* Handle special case A), given x of unsigned type:
2794 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2795 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2796 if (!types_match (from_type, to_type))
2797 {
2798 if (cmp_code == LT_EXPR)
2799 cmp_code = GT_EXPR;
2800 if (cmp_code == GE_EXPR)
2801 cmp_code = LE_EXPR;
2802 c1 = wi::max_value (to_type);
2803 }
2804 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2805 compute (c3 op' c2) and check if it equals to c1 with op' being
2806 the inverted operator of op. Make sure overflow doesn't happen
2807 if it is undefined. */
2808 if (op == PLUS_EXPR)
2809 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2810 else
2811 real_c1 = wi::add (c3, c2, sgn, &overflow);
2812
2813 code = cmp_code;
2814 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2815 {
2816 /* Check if c1 equals to real_c1. Boundary condition is handled
2817 by adjusting comparison operation if necessary. */
2818 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2819 && !overflow)
2820 {
2821 /* X <= Y - 1 equals to X < Y. */
2822 if (cmp_code == LE_EXPR)
2823 code = LT_EXPR;
2824 /* X > Y - 1 equals to X >= Y. */
2825 if (cmp_code == GT_EXPR)
2826 code = GE_EXPR;
2827 }
2828 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2829 && !overflow)
2830 {
2831 /* X < Y + 1 equals to X <= Y. */
2832 if (cmp_code == LT_EXPR)
2833 code = LE_EXPR;
2834 /* X >= Y + 1 equals to X > Y. */
2835 if (cmp_code == GE_EXPR)
2836 code = GT_EXPR;
2837 }
2838 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2839 {
2840 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2841 code = MIN_EXPR;
2842 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2843 code = MAX_EXPR;
2844 }
2845 }
2846 }
2847 (if (code == MAX_EXPR)
2848 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2849 { wide_int_to_tree (from_type, c2); })
2850 (if (code == MIN_EXPR)
2851 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2852 { wide_int_to_tree (from_type, c2); })))))))))
2853
2854 (for cnd (cond vec_cond)
2855 /* A ? B : (A ? X : C) -> A ? B : C. */
2856 (simplify
2857 (cnd @0 (cnd @0 @1 @2) @3)
2858 (cnd @0 @1 @3))
2859 (simplify
2860 (cnd @0 @1 (cnd @0 @2 @3))
2861 (cnd @0 @1 @3))
2862 /* A ? B : (!A ? C : X) -> A ? B : C. */
2863 /* ??? This matches embedded conditions open-coded because genmatch
2864 would generate matching code for conditions in separate stmts only.
2865 The following is still important to merge then and else arm cases
2866 from if-conversion. */
2867 (simplify
2868 (cnd @0 @1 (cnd @2 @3 @4))
2869 (if (COMPARISON_CLASS_P (@0)
2870 && COMPARISON_CLASS_P (@2)
2871 && invert_tree_comparison
2872 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2873 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2874 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2875 (cnd @0 @1 @3)))
2876 (simplify
2877 (cnd @0 (cnd @1 @2 @3) @4)
2878 (if (COMPARISON_CLASS_P (@0)
2879 && COMPARISON_CLASS_P (@1)
2880 && invert_tree_comparison
2881 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2882 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2883 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2884 (cnd @0 @3 @4)))
2885
2886 /* A ? B : B -> B. */
2887 (simplify
2888 (cnd @0 @1 @1)
2889 @1)
2890
2891 /* !A ? B : C -> A ? C : B. */
2892 (simplify
2893 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2894 (cnd @0 @2 @1)))
2895
2896 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2897 return all -1 or all 0 results. */
2898 /* ??? We could instead convert all instances of the vec_cond to negate,
2899 but that isn't necessarily a win on its own. */
2900 (simplify
2901 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2902 (if (VECTOR_TYPE_P (type)
2903 && known_eq (TYPE_VECTOR_SUBPARTS (type),
2904 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
2905 && (TYPE_MODE (TREE_TYPE (type))
2906 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2907 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2908
2909 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
2910 (simplify
2911 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2912 (if (VECTOR_TYPE_P (type)
2913 && known_eq (TYPE_VECTOR_SUBPARTS (type),
2914 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
2915 && (TYPE_MODE (TREE_TYPE (type))
2916 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2917 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2918
2919
2920 /* Simplifications of comparisons. */
2921
2922 /* See if we can reduce the magnitude of a constant involved in a
2923 comparison by changing the comparison code. This is a canonicalization
2924 formerly done by maybe_canonicalize_comparison_1. */
2925 (for cmp (le gt)
2926 acmp (lt ge)
2927 (simplify
2928 (cmp @0 INTEGER_CST@1)
2929 (if (tree_int_cst_sgn (@1) == -1)
2930 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
2931 (for cmp (ge lt)
2932 acmp (gt le)
2933 (simplify
2934 (cmp @0 INTEGER_CST@1)
2935 (if (tree_int_cst_sgn (@1) == 1)
2936 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
2937
2938
2939 /* We can simplify a logical negation of a comparison to the
2940 inverted comparison. As we cannot compute an expression
2941 operator using invert_tree_comparison we have to simulate
2942 that with expression code iteration. */
2943 (for cmp (tcc_comparison)
2944 icmp (inverted_tcc_comparison)
2945 ncmp (inverted_tcc_comparison_with_nans)
2946 /* Ideally we'd like to combine the following two patterns
2947 and handle some more cases by using
2948 (logical_inverted_value (cmp @0 @1))
2949 here but for that genmatch would need to "inline" that.
2950 For now implement what forward_propagate_comparison did. */
2951 (simplify
2952 (bit_not (cmp @0 @1))
2953 (if (VECTOR_TYPE_P (type)
2954 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2955 /* Comparison inversion may be impossible for trapping math,
2956 invert_tree_comparison will tell us. But we can't use
2957 a computed operator in the replacement tree thus we have
2958 to play the trick below. */
2959 (with { enum tree_code ic = invert_tree_comparison
2960 (cmp, HONOR_NANS (@0)); }
2961 (if (ic == icmp)
2962 (icmp @0 @1)
2963 (if (ic == ncmp)
2964 (ncmp @0 @1))))))
2965 (simplify
2966 (bit_xor (cmp @0 @1) integer_truep)
2967 (with { enum tree_code ic = invert_tree_comparison
2968 (cmp, HONOR_NANS (@0)); }
2969 (if (ic == icmp)
2970 (icmp @0 @1)
2971 (if (ic == ncmp)
2972 (ncmp @0 @1))))))
2973
2974 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2975 ??? The transformation is valid for the other operators if overflow
2976 is undefined for the type, but performing it here badly interacts
2977 with the transformation in fold_cond_expr_with_comparison which
2978 attempts to synthetize ABS_EXPR. */
2979 (for cmp (eq ne)
2980 (for sub (minus pointer_diff)
2981 (simplify
2982 (cmp (sub@2 @0 @1) integer_zerop)
2983 (if (single_use (@2))
2984 (cmp @0 @1)))))
2985
2986 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2987 signed arithmetic case. That form is created by the compiler
2988 often enough for folding it to be of value. One example is in
2989 computing loop trip counts after Operator Strength Reduction. */
2990 (for cmp (simple_comparison)
2991 scmp (swapped_simple_comparison)
2992 (simplify
2993 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2994 /* Handle unfolded multiplication by zero. */
2995 (if (integer_zerop (@1))
2996 (cmp @1 @2)
2997 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2998 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2999 && single_use (@3))
3000 /* If @1 is negative we swap the sense of the comparison. */
3001 (if (tree_int_cst_sgn (@1) < 0)
3002 (scmp @0 @2)
3003 (cmp @0 @2))))))
3004
3005 /* Simplify comparison of something with itself. For IEEE
3006 floating-point, we can only do some of these simplifications. */
3007 (for cmp (eq ge le)
3008 (simplify
3009 (cmp @0 @0)
3010 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3011 || ! HONOR_NANS (@0))
3012 { constant_boolean_node (true, type); }
3013 (if (cmp != EQ_EXPR)
3014 (eq @0 @0)))))
3015 (for cmp (ne gt lt)
3016 (simplify
3017 (cmp @0 @0)
3018 (if (cmp != NE_EXPR
3019 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3020 || ! HONOR_NANS (@0))
3021 { constant_boolean_node (false, type); })))
3022 (for cmp (unle unge uneq)
3023 (simplify
3024 (cmp @0 @0)
3025 { constant_boolean_node (true, type); }))
3026 (for cmp (unlt ungt)
3027 (simplify
3028 (cmp @0 @0)
3029 (unordered @0 @0)))
3030 (simplify
3031 (ltgt @0 @0)
3032 (if (!flag_trapping_math)
3033 { constant_boolean_node (false, type); }))
3034
3035 /* Fold ~X op ~Y as Y op X. */
3036 (for cmp (simple_comparison)
3037 (simplify
3038 (cmp (bit_not@2 @0) (bit_not@3 @1))
3039 (if (single_use (@2) && single_use (@3))
3040 (cmp @1 @0))))
3041
3042 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3043 (for cmp (simple_comparison)
3044 scmp (swapped_simple_comparison)
3045 (simplify
3046 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3047 (if (single_use (@2)
3048 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3049 (scmp @0 (bit_not @1)))))
3050
3051 (for cmp (simple_comparison)
3052 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3053 (simplify
3054 (cmp (convert@2 @0) (convert? @1))
3055 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3056 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3057 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3058 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3059 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3060 (with
3061 {
3062 tree type1 = TREE_TYPE (@1);
3063 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3064 {
3065 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3066 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3067 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3068 type1 = float_type_node;
3069 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3070 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3071 type1 = double_type_node;
3072 }
3073 tree newtype
3074 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3075 ? TREE_TYPE (@0) : type1);
3076 }
3077 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3078 (cmp (convert:newtype @0) (convert:newtype @1))))))
3079
3080 (simplify
3081 (cmp @0 REAL_CST@1)
3082 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3083 (switch
3084 /* a CMP (-0) -> a CMP 0 */
3085 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3086 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3087 /* x != NaN is always true, other ops are always false. */
3088 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3089 && ! HONOR_SNANS (@1))
3090 { constant_boolean_node (cmp == NE_EXPR, type); })
3091 /* Fold comparisons against infinity. */
3092 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3093 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3094 (with
3095 {
3096 REAL_VALUE_TYPE max;
3097 enum tree_code code = cmp;
3098 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3099 if (neg)
3100 code = swap_tree_comparison (code);
3101 }
3102 (switch
3103 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3104 (if (code == GT_EXPR
3105 && !(HONOR_NANS (@0) && flag_trapping_math))
3106 { constant_boolean_node (false, type); })
3107 (if (code == LE_EXPR)
3108 /* x <= +Inf is always true, if we don't care about NaNs. */
3109 (if (! HONOR_NANS (@0))
3110 { constant_boolean_node (true, type); }
3111 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3112 an "invalid" exception. */
3113 (if (!flag_trapping_math)
3114 (eq @0 @0))))
3115 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3116 for == this introduces an exception for x a NaN. */
3117 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3118 || code == GE_EXPR)
3119 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3120 (if (neg)
3121 (lt @0 { build_real (TREE_TYPE (@0), max); })
3122 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3123 /* x < +Inf is always equal to x <= DBL_MAX. */
3124 (if (code == LT_EXPR)
3125 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3126 (if (neg)
3127 (ge @0 { build_real (TREE_TYPE (@0), max); })
3128 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3129 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3130 an exception for x a NaN so use an unordered comparison. */
3131 (if (code == NE_EXPR)
3132 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3133 (if (! HONOR_NANS (@0))
3134 (if (neg)
3135 (ge @0 { build_real (TREE_TYPE (@0), max); })
3136 (le @0 { build_real (TREE_TYPE (@0), max); }))
3137 (if (neg)
3138 (unge @0 { build_real (TREE_TYPE (@0), max); })
3139 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3140
3141 /* If this is a comparison of a real constant with a PLUS_EXPR
3142 or a MINUS_EXPR of a real constant, we can convert it into a
3143 comparison with a revised real constant as long as no overflow
3144 occurs when unsafe_math_optimizations are enabled. */
3145 (if (flag_unsafe_math_optimizations)
3146 (for op (plus minus)
3147 (simplify
3148 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3149 (with
3150 {
3151 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3152 TREE_TYPE (@1), @2, @1);
3153 }
3154 (if (tem && !TREE_OVERFLOW (tem))
3155 (cmp @0 { tem; }))))))
3156
3157 /* Likewise, we can simplify a comparison of a real constant with
3158 a MINUS_EXPR whose first operand is also a real constant, i.e.
3159 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3160 floating-point types only if -fassociative-math is set. */
3161 (if (flag_associative_math)
3162 (simplify
3163 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3164 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3165 (if (tem && !TREE_OVERFLOW (tem))
3166 (cmp { tem; } @1)))))
3167
3168 /* Fold comparisons against built-in math functions. */
3169 (if (flag_unsafe_math_optimizations
3170 && ! flag_errno_math)
3171 (for sq (SQRT)
3172 (simplify
3173 (cmp (sq @0) REAL_CST@1)
3174 (switch
3175 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3176 (switch
3177 /* sqrt(x) < y is always false, if y is negative. */
3178 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3179 { constant_boolean_node (false, type); })
3180 /* sqrt(x) > y is always true, if y is negative and we
3181 don't care about NaNs, i.e. negative values of x. */
3182 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3183 { constant_boolean_node (true, type); })
3184 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3185 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3186 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3187 (switch
3188 /* sqrt(x) < 0 is always false. */
3189 (if (cmp == LT_EXPR)
3190 { constant_boolean_node (false, type); })
3191 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3192 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3193 { constant_boolean_node (true, type); })
3194 /* sqrt(x) <= 0 -> x == 0. */
3195 (if (cmp == LE_EXPR)
3196 (eq @0 @1))
3197 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3198 == or !=. In the last case:
3199
3200 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3201
3202 if x is negative or NaN. Due to -funsafe-math-optimizations,
3203 the results for other x follow from natural arithmetic. */
3204 (cmp @0 @1)))
3205 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3206 (with
3207 {
3208 REAL_VALUE_TYPE c2;
3209 real_arithmetic (&c2, MULT_EXPR,
3210 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3211 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3212 }
3213 (if (REAL_VALUE_ISINF (c2))
3214 /* sqrt(x) > y is x == +Inf, when y is very large. */
3215 (if (HONOR_INFINITIES (@0))
3216 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3217 { constant_boolean_node (false, type); })
3218 /* sqrt(x) > c is the same as x > c*c. */
3219 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3220 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3221 (with
3222 {
3223 REAL_VALUE_TYPE c2;
3224 real_arithmetic (&c2, MULT_EXPR,
3225 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3226 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3227 }
3228 (if (REAL_VALUE_ISINF (c2))
3229 (switch
3230 /* sqrt(x) < y is always true, when y is a very large
3231 value and we don't care about NaNs or Infinities. */
3232 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3233 { constant_boolean_node (true, type); })
3234 /* sqrt(x) < y is x != +Inf when y is very large and we
3235 don't care about NaNs. */
3236 (if (! HONOR_NANS (@0))
3237 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3238 /* sqrt(x) < y is x >= 0 when y is very large and we
3239 don't care about Infinities. */
3240 (if (! HONOR_INFINITIES (@0))
3241 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3242 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3243 (if (GENERIC)
3244 (truth_andif
3245 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3246 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3247 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3248 (if (! HONOR_NANS (@0))
3249 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3250 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3251 (if (GENERIC)
3252 (truth_andif
3253 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3254 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3255 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3256 (simplify
3257 (cmp (sq @0) (sq @1))
3258 (if (! HONOR_NANS (@0))
3259 (cmp @0 @1))))))
3260
3261 /* Optimize various special cases of (FTYPE) N CMP CST. */
3262 (for cmp (lt le eq ne ge gt)
3263 icmp (le le eq ne ge ge)
3264 (simplify
3265 (cmp (float @0) REAL_CST@1)
3266 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3267 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3268 (with
3269 {
3270 tree itype = TREE_TYPE (@0);
3271 signop isign = TYPE_SIGN (itype);
3272 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3273 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3274 /* Be careful to preserve any potential exceptions due to
3275 NaNs. qNaNs are ok in == or != context.
3276 TODO: relax under -fno-trapping-math or
3277 -fno-signaling-nans. */
3278 bool exception_p
3279 = real_isnan (cst) && (cst->signalling
3280 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3281 /* INT?_MIN is power-of-two so it takes
3282 only one mantissa bit. */
3283 bool signed_p = isign == SIGNED;
3284 bool itype_fits_ftype_p
3285 = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt);
3286 }
3287 /* TODO: allow non-fitting itype and SNaNs when
3288 -fno-trapping-math. */
3289 (if (itype_fits_ftype_p && ! exception_p)
3290 (with
3291 {
3292 REAL_VALUE_TYPE imin, imax;
3293 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3294 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3295
3296 REAL_VALUE_TYPE icst;
3297 if (cmp == GT_EXPR || cmp == GE_EXPR)
3298 real_ceil (&icst, fmt, cst);
3299 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3300 real_floor (&icst, fmt, cst);
3301 else
3302 real_trunc (&icst, fmt, cst);
3303
3304 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3305
3306 bool overflow_p = false;
3307 wide_int icst_val
3308 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3309 }
3310 (switch
3311 /* Optimize cases when CST is outside of ITYPE's range. */
3312 (if (real_compare (LT_EXPR, cst, &imin))
3313 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3314 type); })
3315 (if (real_compare (GT_EXPR, cst, &imax))
3316 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3317 type); })
3318 /* Remove cast if CST is an integer representable by ITYPE. */
3319 (if (cst_int_p)
3320 (cmp @0 { gcc_assert (!overflow_p);
3321 wide_int_to_tree (itype, icst_val); })
3322 )
3323 /* When CST is fractional, optimize
3324 (FTYPE) N == CST -> 0
3325 (FTYPE) N != CST -> 1. */
3326 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3327 { constant_boolean_node (cmp == NE_EXPR, type); })
3328 /* Otherwise replace with sensible integer constant. */
3329 (with
3330 {
3331 gcc_checking_assert (!overflow_p);
3332 }
3333 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3334
3335 /* Fold A /[ex] B CMP C to A CMP B * C. */
3336 (for cmp (eq ne)
3337 (simplify
3338 (cmp (exact_div @0 @1) INTEGER_CST@2)
3339 (if (!integer_zerop (@1))
3340 (if (wi::to_wide (@2) == 0)
3341 (cmp @0 @2)
3342 (if (TREE_CODE (@1) == INTEGER_CST)
3343 (with
3344 {
3345 bool ovf;
3346 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3347 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3348 }
3349 (if (ovf)
3350 { constant_boolean_node (cmp == NE_EXPR, type); }
3351 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3352 (for cmp (lt le gt ge)
3353 (simplify
3354 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3355 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3356 (with
3357 {
3358 bool ovf;
3359 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3360 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3361 }
3362 (if (ovf)
3363 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3364 TYPE_SIGN (TREE_TYPE (@2)))
3365 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3366 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3367
3368 /* Unordered tests if either argument is a NaN. */
3369 (simplify
3370 (bit_ior (unordered @0 @0) (unordered @1 @1))
3371 (if (types_match (@0, @1))
3372 (unordered @0 @1)))
3373 (simplify
3374 (bit_and (ordered @0 @0) (ordered @1 @1))
3375 (if (types_match (@0, @1))
3376 (ordered @0 @1)))
3377 (simplify
3378 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3379 @2)
3380 (simplify
3381 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3382 @2)
3383
3384 /* Simple range test simplifications. */
3385 /* A < B || A >= B -> true. */
3386 (for test1 (lt le le le ne ge)
3387 test2 (ge gt ge ne eq ne)
3388 (simplify
3389 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3390 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3391 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3392 { constant_boolean_node (true, type); })))
3393 /* A < B && A >= B -> false. */
3394 (for test1 (lt lt lt le ne eq)
3395 test2 (ge gt eq gt eq gt)
3396 (simplify
3397 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3398 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3399 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3400 { constant_boolean_node (false, type); })))
3401
3402 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3403 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3404
3405 Note that comparisons
3406 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3407 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3408 will be canonicalized to above so there's no need to
3409 consider them here.
3410 */
3411
3412 (for cmp (le gt)
3413 eqcmp (eq ne)
3414 (simplify
3415 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3416 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3417 (with
3418 {
3419 tree ty = TREE_TYPE (@0);
3420 unsigned prec = TYPE_PRECISION (ty);
3421 wide_int mask = wi::to_wide (@2, prec);
3422 wide_int rhs = wi::to_wide (@3, prec);
3423 signop sgn = TYPE_SIGN (ty);
3424 }
3425 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3426 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3427 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3428 { build_zero_cst (ty); }))))))
3429
3430 /* -A CMP -B -> B CMP A. */
3431 (for cmp (tcc_comparison)
3432 scmp (swapped_tcc_comparison)
3433 (simplify
3434 (cmp (negate @0) (negate @1))
3435 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3436 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3437 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3438 (scmp @0 @1)))
3439 (simplify
3440 (cmp (negate @0) CONSTANT_CLASS_P@1)
3441 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3442 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3443 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3444 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3445 (if (tem && !TREE_OVERFLOW (tem))
3446 (scmp @0 { tem; }))))))
3447
3448 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3449 (for op (eq ne)
3450 (simplify
3451 (op (abs @0) zerop@1)
3452 (op @0 @1)))
3453
3454 /* From fold_sign_changed_comparison and fold_widened_comparison.
3455 FIXME: the lack of symmetry is disturbing. */
3456 (for cmp (simple_comparison)
3457 (simplify
3458 (cmp (convert@0 @00) (convert?@1 @10))
3459 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3460 /* Disable this optimization if we're casting a function pointer
3461 type on targets that require function pointer canonicalization. */
3462 && !(targetm.have_canonicalize_funcptr_for_compare ()
3463 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
3464 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
3465 && single_use (@0))
3466 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3467 && (TREE_CODE (@10) == INTEGER_CST
3468 || @1 != @10)
3469 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3470 || cmp == NE_EXPR
3471 || cmp == EQ_EXPR)
3472 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3473 /* ??? The special-casing of INTEGER_CST conversion was in the original
3474 code and here to avoid a spurious overflow flag on the resulting
3475 constant which fold_convert produces. */
3476 (if (TREE_CODE (@1) == INTEGER_CST)
3477 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3478 TREE_OVERFLOW (@1)); })
3479 (cmp @00 (convert @1)))
3480
3481 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3482 /* If possible, express the comparison in the shorter mode. */
3483 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3484 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3485 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3486 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3487 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3488 || ((TYPE_PRECISION (TREE_TYPE (@00))
3489 >= TYPE_PRECISION (TREE_TYPE (@10)))
3490 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3491 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3492 || (TREE_CODE (@10) == INTEGER_CST
3493 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3494 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3495 (cmp @00 (convert @10))
3496 (if (TREE_CODE (@10) == INTEGER_CST
3497 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3498 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3499 (with
3500 {
3501 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3502 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3503 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3504 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3505 }
3506 (if (above || below)
3507 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3508 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3509 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3510 { constant_boolean_node (above ? true : false, type); }
3511 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3512 { constant_boolean_node (above ? false : true, type); }))))))))))))
3513
3514 (for cmp (eq ne)
3515 /* A local variable can never be pointed to by
3516 the default SSA name of an incoming parameter.
3517 SSA names are canonicalized to 2nd place. */
3518 (simplify
3519 (cmp addr@0 SSA_NAME@1)
3520 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3521 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3522 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3523 (if (TREE_CODE (base) == VAR_DECL
3524 && auto_var_in_fn_p (base, current_function_decl))
3525 (if (cmp == NE_EXPR)
3526 { constant_boolean_node (true, type); }
3527 { constant_boolean_node (false, type); }))))))
3528
3529 /* Equality compare simplifications from fold_binary */
3530 (for cmp (eq ne)
3531
3532 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3533 Similarly for NE_EXPR. */
3534 (simplify
3535 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3536 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3537 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3538 { constant_boolean_node (cmp == NE_EXPR, type); }))
3539
3540 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3541 (simplify
3542 (cmp (bit_xor @0 @1) integer_zerop)
3543 (cmp @0 @1))
3544
3545 /* (X ^ Y) == Y becomes X == 0.
3546 Likewise (X ^ Y) == X becomes Y == 0. */
3547 (simplify
3548 (cmp:c (bit_xor:c @0 @1) @0)
3549 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3550
3551 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3552 (simplify
3553 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3554 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3555 (cmp @0 (bit_xor @1 (convert @2)))))
3556
3557 (simplify
3558 (cmp (convert? addr@0) integer_zerop)
3559 (if (tree_single_nonzero_warnv_p (@0, NULL))
3560 { constant_boolean_node (cmp == NE_EXPR, type); })))
3561
3562 /* If we have (A & C) == C where C is a power of 2, convert this into
3563 (A & C) != 0. Similarly for NE_EXPR. */
3564 (for cmp (eq ne)
3565 icmp (ne eq)
3566 (simplify
3567 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3568 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3569
3570 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3571 convert this into a shift followed by ANDing with D. */
3572 (simplify
3573 (cond
3574 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3575 INTEGER_CST@2 integer_zerop)
3576 (if (integer_pow2p (@2))
3577 (with {
3578 int shift = (wi::exact_log2 (wi::to_wide (@2))
3579 - wi::exact_log2 (wi::to_wide (@1)));
3580 }
3581 (if (shift > 0)
3582 (bit_and
3583 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3584 (bit_and
3585 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3586 @2)))))
3587
3588 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3589 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3590 (for cmp (eq ne)
3591 ncmp (ge lt)
3592 (simplify
3593 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3594 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3595 && type_has_mode_precision_p (TREE_TYPE (@0))
3596 && element_precision (@2) >= element_precision (@0)
3597 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3598 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3599 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3600
3601 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3602 this into a right shift or sign extension followed by ANDing with C. */
3603 (simplify
3604 (cond
3605 (lt @0 integer_zerop)
3606 INTEGER_CST@1 integer_zerop)
3607 (if (integer_pow2p (@1)
3608 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
3609 (with {
3610 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3611 }
3612 (if (shift >= 0)
3613 (bit_and
3614 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3615 @1)
3616 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3617 sign extension followed by AND with C will achieve the effect. */
3618 (bit_and (convert @0) @1)))))
3619
3620 /* When the addresses are not directly of decls compare base and offset.
3621 This implements some remaining parts of fold_comparison address
3622 comparisons but still no complete part of it. Still it is good
3623 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3624 (for cmp (simple_comparison)
3625 (simplify
3626 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3627 (with
3628 {
3629 poly_int64 off0, off1;
3630 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3631 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3632 if (base0 && TREE_CODE (base0) == MEM_REF)
3633 {
3634 off0 += mem_ref_offset (base0).force_shwi ();
3635 base0 = TREE_OPERAND (base0, 0);
3636 }
3637 if (base1 && TREE_CODE (base1) == MEM_REF)
3638 {
3639 off1 += mem_ref_offset (base1).force_shwi ();
3640 base1 = TREE_OPERAND (base1, 0);
3641 }
3642 }
3643 (if (base0 && base1)
3644 (with
3645 {
3646 int equal = 2;
3647 /* Punt in GENERIC on variables with value expressions;
3648 the value expressions might point to fields/elements
3649 of other vars etc. */
3650 if (GENERIC
3651 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3652 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3653 ;
3654 else if (decl_in_symtab_p (base0)
3655 && decl_in_symtab_p (base1))
3656 equal = symtab_node::get_create (base0)
3657 ->equal_address_to (symtab_node::get_create (base1));
3658 else if ((DECL_P (base0)
3659 || TREE_CODE (base0) == SSA_NAME
3660 || TREE_CODE (base0) == STRING_CST)
3661 && (DECL_P (base1)
3662 || TREE_CODE (base1) == SSA_NAME
3663 || TREE_CODE (base1) == STRING_CST))
3664 equal = (base0 == base1);
3665 }
3666 (if (equal == 1
3667 && (cmp == EQ_EXPR || cmp == NE_EXPR
3668 /* If the offsets are equal we can ignore overflow. */
3669 || known_eq (off0, off1)
3670 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3671 /* Or if we compare using pointers to decls or strings. */
3672 || (POINTER_TYPE_P (TREE_TYPE (@2))
3673 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
3674 (switch
3675 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3676 { constant_boolean_node (known_eq (off0, off1), type); })
3677 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3678 { constant_boolean_node (known_ne (off0, off1), type); })
3679 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3680 { constant_boolean_node (known_lt (off0, off1), type); })
3681 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3682 { constant_boolean_node (known_le (off0, off1), type); })
3683 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3684 { constant_boolean_node (known_ge (off0, off1), type); })
3685 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3686 { constant_boolean_node (known_gt (off0, off1), type); }))
3687 (if (equal == 0
3688 && DECL_P (base0) && DECL_P (base1)
3689 /* If we compare this as integers require equal offset. */
3690 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3691 || known_eq (off0, off1)))
3692 (switch
3693 (if (cmp == EQ_EXPR)
3694 { constant_boolean_node (false, type); })
3695 (if (cmp == NE_EXPR)
3696 { constant_boolean_node (true, type); })))))))))
3697
3698 /* Simplify pointer equality compares using PTA. */
3699 (for neeq (ne eq)
3700 (simplify
3701 (neeq @0 @1)
3702 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3703 && ptrs_compare_unequal (@0, @1))
3704 { constant_boolean_node (neeq != EQ_EXPR, type); })))
3705
3706 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3707 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3708 Disable the transform if either operand is pointer to function.
3709 This broke pr22051-2.c for arm where function pointer
3710 canonicalizaion is not wanted. */
3711
3712 (for cmp (ne eq)
3713 (simplify
3714 (cmp (convert @0) INTEGER_CST@1)
3715 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
3716 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3717 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3718 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3719 && POINTER_TYPE_P (TREE_TYPE (@1))
3720 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3721 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
3722 (cmp @0 (convert @1)))))
3723
3724 /* Non-equality compare simplifications from fold_binary */
3725 (for cmp (lt gt le ge)
3726 /* Comparisons with the highest or lowest possible integer of
3727 the specified precision will have known values. */
3728 (simplify
3729 (cmp (convert?@2 @0) INTEGER_CST@1)
3730 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3731 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3732 (with
3733 {
3734 tree arg1_type = TREE_TYPE (@1);
3735 unsigned int prec = TYPE_PRECISION (arg1_type);
3736 wide_int max = wi::max_value (arg1_type);
3737 wide_int signed_max = wi::max_value (prec, SIGNED);
3738 wide_int min = wi::min_value (arg1_type);
3739 }
3740 (switch
3741 (if (wi::to_wide (@1) == max)
3742 (switch
3743 (if (cmp == GT_EXPR)
3744 { constant_boolean_node (false, type); })
3745 (if (cmp == GE_EXPR)
3746 (eq @2 @1))
3747 (if (cmp == LE_EXPR)
3748 { constant_boolean_node (true, type); })
3749 (if (cmp == LT_EXPR)
3750 (ne @2 @1))))
3751 (if (wi::to_wide (@1) == min)
3752 (switch
3753 (if (cmp == LT_EXPR)
3754 { constant_boolean_node (false, type); })
3755 (if (cmp == LE_EXPR)
3756 (eq @2 @1))
3757 (if (cmp == GE_EXPR)
3758 { constant_boolean_node (true, type); })
3759 (if (cmp == GT_EXPR)
3760 (ne @2 @1))))
3761 (if (wi::to_wide (@1) == max - 1)
3762 (switch
3763 (if (cmp == GT_EXPR)
3764 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
3765 (if (cmp == LE_EXPR)
3766 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3767 (if (wi::to_wide (@1) == min + 1)
3768 (switch
3769 (if (cmp == GE_EXPR)
3770 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
3771 (if (cmp == LT_EXPR)
3772 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3773 (if (wi::to_wide (@1) == signed_max
3774 && TYPE_UNSIGNED (arg1_type)
3775 /* We will flip the signedness of the comparison operator
3776 associated with the mode of @1, so the sign bit is
3777 specified by this mode. Check that @1 is the signed
3778 max associated with this sign bit. */
3779 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
3780 /* signed_type does not work on pointer types. */
3781 && INTEGRAL_TYPE_P (arg1_type))
3782 /* The following case also applies to X < signed_max+1
3783 and X >= signed_max+1 because previous transformations. */
3784 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3785 (with { tree st = signed_type_for (arg1_type); }
3786 (if (cmp == LE_EXPR)
3787 (ge (convert:st @0) { build_zero_cst (st); })
3788 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3789
3790 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3791 /* If the second operand is NaN, the result is constant. */
3792 (simplify
3793 (cmp @0 REAL_CST@1)
3794 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3795 && (cmp != LTGT_EXPR || ! flag_trapping_math))
3796 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
3797 ? false : true, type); })))
3798
3799 /* bool_var != 0 becomes bool_var. */
3800 (simplify
3801 (ne @0 integer_zerop)
3802 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3803 && types_match (type, TREE_TYPE (@0)))
3804 (non_lvalue @0)))
3805 /* bool_var == 1 becomes bool_var. */
3806 (simplify
3807 (eq @0 integer_onep)
3808 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3809 && types_match (type, TREE_TYPE (@0)))
3810 (non_lvalue @0)))
3811 /* Do not handle
3812 bool_var == 0 becomes !bool_var or
3813 bool_var != 1 becomes !bool_var
3814 here because that only is good in assignment context as long
3815 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3816 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3817 clearly less optimal and which we'll transform again in forwprop. */
3818
3819 /* When one argument is a constant, overflow detection can be simplified.
3820 Currently restricted to single use so as not to interfere too much with
3821 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3822 A + CST CMP A -> A CMP' CST' */
3823 (for cmp (lt le ge gt)
3824 out (gt gt le le)
3825 (simplify
3826 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
3827 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3828 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3829 && wi::to_wide (@1) != 0
3830 && single_use (@2))
3831 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
3832 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
3833 wi::max_value (prec, UNSIGNED)
3834 - wi::to_wide (@1)); })))))
3835
3836 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3837 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3838 expects the long form, so we restrict the transformation for now. */
3839 (for cmp (gt le)
3840 (simplify
3841 (cmp:c (minus@2 @0 @1) @0)
3842 (if (single_use (@2)
3843 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3844 && TYPE_UNSIGNED (TREE_TYPE (@0))
3845 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3846 (cmp @1 @0))))
3847
3848 /* Testing for overflow is unnecessary if we already know the result. */
3849 /* A - B > A */
3850 (for cmp (gt le)
3851 out (ne eq)
3852 (simplify
3853 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3854 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3855 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3856 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3857 /* A + B < A */
3858 (for cmp (lt ge)
3859 out (ne eq)
3860 (simplify
3861 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3862 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3863 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3864 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3865
3866 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
3867 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
3868 (for cmp (lt ge)
3869 out (ne eq)
3870 (simplify
3871 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
3872 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3873 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3874 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
3875
3876 /* Simplification of math builtins. These rules must all be optimizations
3877 as well as IL simplifications. If there is a possibility that the new
3878 form could be a pessimization, the rule should go in the canonicalization
3879 section that follows this one.
3880
3881 Rules can generally go in this section if they satisfy one of
3882 the following:
3883
3884 - the rule describes an identity
3885
3886 - the rule replaces calls with something as simple as addition or
3887 multiplication
3888
3889 - the rule contains unary calls only and simplifies the surrounding
3890 arithmetic. (The idea here is to exclude non-unary calls in which
3891 one operand is constant and in which the call is known to be cheap
3892 when the operand has that value.) */
3893
3894 (if (flag_unsafe_math_optimizations)
3895 /* Simplify sqrt(x) * sqrt(x) -> x. */
3896 (simplify
3897 (mult (SQRT_ALL@1 @0) @1)
3898 (if (!HONOR_SNANS (type))
3899 @0))
3900
3901 (for op (plus minus)
3902 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
3903 (simplify
3904 (op (rdiv @0 @1)
3905 (rdiv @2 @1))
3906 (rdiv (op @0 @2) @1)))
3907
3908 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3909 (for root (SQRT CBRT)
3910 (simplify
3911 (mult (root:s @0) (root:s @1))
3912 (root (mult @0 @1))))
3913
3914 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3915 (for exps (EXP EXP2 EXP10 POW10)
3916 (simplify
3917 (mult (exps:s @0) (exps:s @1))
3918 (exps (plus @0 @1))))
3919
3920 /* Simplify a/root(b/c) into a*root(c/b). */
3921 (for root (SQRT CBRT)
3922 (simplify
3923 (rdiv @0 (root:s (rdiv:s @1 @2)))
3924 (mult @0 (root (rdiv @2 @1)))))
3925
3926 /* Simplify x/expN(y) into x*expN(-y). */
3927 (for exps (EXP EXP2 EXP10 POW10)
3928 (simplify
3929 (rdiv @0 (exps:s @1))
3930 (mult @0 (exps (negate @1)))))
3931
3932 (for logs (LOG LOG2 LOG10 LOG10)
3933 exps (EXP EXP2 EXP10 POW10)
3934 /* logN(expN(x)) -> x. */
3935 (simplify
3936 (logs (exps @0))
3937 @0)
3938 /* expN(logN(x)) -> x. */
3939 (simplify
3940 (exps (logs @0))
3941 @0))
3942
3943 /* Optimize logN(func()) for various exponential functions. We
3944 want to determine the value "x" and the power "exponent" in
3945 order to transform logN(x**exponent) into exponent*logN(x). */
3946 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3947 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
3948 (simplify
3949 (logs (exps @0))
3950 (if (SCALAR_FLOAT_TYPE_P (type))
3951 (with {
3952 tree x;
3953 switch (exps)
3954 {
3955 CASE_CFN_EXP:
3956 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3957 x = build_real_truncate (type, dconst_e ());
3958 break;
3959 CASE_CFN_EXP2:
3960 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3961 x = build_real (type, dconst2);
3962 break;
3963 CASE_CFN_EXP10:
3964 CASE_CFN_POW10:
3965 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3966 {
3967 REAL_VALUE_TYPE dconst10;
3968 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3969 x = build_real (type, dconst10);
3970 }
3971 break;
3972 default:
3973 gcc_unreachable ();
3974 }
3975 }
3976 (mult (logs { x; }) @0)))))
3977
3978 (for logs (LOG LOG
3979 LOG2 LOG2
3980 LOG10 LOG10)
3981 exps (SQRT CBRT)
3982 (simplify
3983 (logs (exps @0))
3984 (if (SCALAR_FLOAT_TYPE_P (type))
3985 (with {
3986 tree x;
3987 switch (exps)
3988 {
3989 CASE_CFN_SQRT:
3990 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3991 x = build_real (type, dconsthalf);
3992 break;
3993 CASE_CFN_CBRT:
3994 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3995 x = build_real_truncate (type, dconst_third ());
3996 break;
3997 default:
3998 gcc_unreachable ();
3999 }
4000 }
4001 (mult { x; } (logs @0))))))
4002
4003 /* logN(pow(x,exponent)) -> exponent*logN(x). */
4004 (for logs (LOG LOG2 LOG10)
4005 pows (POW)
4006 (simplify
4007 (logs (pows @0 @1))
4008 (mult @1 (logs @0))))
4009
4010 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4011 or if C is a positive power of 2,
4012 pow(C,x) -> exp2(log2(C)*x). */
4013 #if GIMPLE
4014 (for pows (POW)
4015 exps (EXP)
4016 logs (LOG)
4017 exp2s (EXP2)
4018 log2s (LOG2)
4019 (simplify
4020 (pows REAL_CST@0 @1)
4021 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4022 && real_isfinite (TREE_REAL_CST_PTR (@0))
4023 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4024 the use_exp2 case until after vectorization. It seems actually
4025 beneficial for all constants to postpone this until later,
4026 because exp(log(C)*x), while faster, will have worse precision
4027 and if x folds into a constant too, that is unnecessary
4028 pessimization. */
4029 && canonicalize_math_after_vectorization_p ())
4030 (with {
4031 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4032 bool use_exp2 = false;
4033 if (targetm.libc_has_function (function_c99_misc)
4034 && value->cl == rvc_normal)
4035 {
4036 REAL_VALUE_TYPE frac_rvt = *value;
4037 SET_REAL_EXP (&frac_rvt, 1);
4038 if (real_equal (&frac_rvt, &dconst1))
4039 use_exp2 = true;
4040 }
4041 }
4042 (if (!use_exp2)
4043 (if (optimize_pow_to_exp (@0, @1))
4044 (exps (mult (logs @0) @1)))
4045 (exp2s (mult (log2s @0) @1)))))))
4046 #endif
4047
4048 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4049 (for pows (POW)
4050 exps (EXP EXP2 EXP10 POW10)
4051 logs (LOG LOG2 LOG10 LOG10)
4052 (simplify
4053 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4054 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4055 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4056 (exps (plus (mult (logs @0) @1) @2)))))
4057
4058 (for sqrts (SQRT)
4059 cbrts (CBRT)
4060 pows (POW)
4061 exps (EXP EXP2 EXP10 POW10)
4062 /* sqrt(expN(x)) -> expN(x*0.5). */
4063 (simplify
4064 (sqrts (exps @0))
4065 (exps (mult @0 { build_real (type, dconsthalf); })))
4066 /* cbrt(expN(x)) -> expN(x/3). */
4067 (simplify
4068 (cbrts (exps @0))
4069 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4070 /* pow(expN(x), y) -> expN(x*y). */
4071 (simplify
4072 (pows (exps @0) @1)
4073 (exps (mult @0 @1))))
4074
4075 /* tan(atan(x)) -> x. */
4076 (for tans (TAN)
4077 atans (ATAN)
4078 (simplify
4079 (tans (atans @0))
4080 @0)))
4081
4082 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4083 (simplify
4084 (CABS (complex:C @0 real_zerop@1))
4085 (abs @0))
4086
4087 /* trunc(trunc(x)) -> trunc(x), etc. */
4088 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4089 (simplify
4090 (fns (fns @0))
4091 (fns @0)))
4092 /* f(x) -> x if x is integer valued and f does nothing for such values. */
4093 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4094 (simplify
4095 (fns integer_valued_real_p@0)
4096 @0))
4097
4098 /* hypot(x,0) and hypot(0,x) -> abs(x). */
4099 (simplify
4100 (HYPOT:c @0 real_zerop@1)
4101 (abs @0))
4102
4103 /* pow(1,x) -> 1. */
4104 (simplify
4105 (POW real_onep@0 @1)
4106 @0)
4107
4108 (simplify
4109 /* copysign(x,x) -> x. */
4110 (COPYSIGN_ALL @0 @0)
4111 @0)
4112
4113 (simplify
4114 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
4115 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
4116 (abs @0))
4117
4118 (for scale (LDEXP SCALBN SCALBLN)
4119 /* ldexp(0, x) -> 0. */
4120 (simplify
4121 (scale real_zerop@0 @1)
4122 @0)
4123 /* ldexp(x, 0) -> x. */
4124 (simplify
4125 (scale @0 integer_zerop@1)
4126 @0)
4127 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4128 (simplify
4129 (scale REAL_CST@0 @1)
4130 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4131 @0)))
4132
4133 /* Canonicalization of sequences of math builtins. These rules represent
4134 IL simplifications but are not necessarily optimizations.
4135
4136 The sincos pass is responsible for picking "optimal" implementations
4137 of math builtins, which may be more complicated and can sometimes go
4138 the other way, e.g. converting pow into a sequence of sqrts.
4139 We only want to do these canonicalizations before the pass has run. */
4140
4141 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4142 /* Simplify tan(x) * cos(x) -> sin(x). */
4143 (simplify
4144 (mult:c (TAN:s @0) (COS:s @0))
4145 (SIN @0))
4146
4147 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4148 (simplify
4149 (mult:c @0 (POW:s @0 REAL_CST@1))
4150 (if (!TREE_OVERFLOW (@1))
4151 (POW @0 (plus @1 { build_one_cst (type); }))))
4152
4153 /* Simplify sin(x) / cos(x) -> tan(x). */
4154 (simplify
4155 (rdiv (SIN:s @0) (COS:s @0))
4156 (TAN @0))
4157
4158 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4159 (simplify
4160 (rdiv (COS:s @0) (SIN:s @0))
4161 (rdiv { build_one_cst (type); } (TAN @0)))
4162
4163 /* Simplify sin(x) / tan(x) -> cos(x). */
4164 (simplify
4165 (rdiv (SIN:s @0) (TAN:s @0))
4166 (if (! HONOR_NANS (@0)
4167 && ! HONOR_INFINITIES (@0))
4168 (COS @0)))
4169
4170 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4171 (simplify
4172 (rdiv (TAN:s @0) (SIN:s @0))
4173 (if (! HONOR_NANS (@0)
4174 && ! HONOR_INFINITIES (@0))
4175 (rdiv { build_one_cst (type); } (COS @0))))
4176
4177 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4178 (simplify
4179 (mult (POW:s @0 @1) (POW:s @0 @2))
4180 (POW @0 (plus @1 @2)))
4181
4182 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4183 (simplify
4184 (mult (POW:s @0 @1) (POW:s @2 @1))
4185 (POW (mult @0 @2) @1))
4186
4187 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4188 (simplify
4189 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4190 (POWI (mult @0 @2) @1))
4191
4192 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4193 (simplify
4194 (rdiv (POW:s @0 REAL_CST@1) @0)
4195 (if (!TREE_OVERFLOW (@1))
4196 (POW @0 (minus @1 { build_one_cst (type); }))))
4197
4198 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4199 (simplify
4200 (rdiv @0 (POW:s @1 @2))
4201 (mult @0 (POW @1 (negate @2))))
4202
4203 (for sqrts (SQRT)
4204 cbrts (CBRT)
4205 pows (POW)
4206 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4207 (simplify
4208 (sqrts (sqrts @0))
4209 (pows @0 { build_real (type, dconst_quarter ()); }))
4210 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4211 (simplify
4212 (sqrts (cbrts @0))
4213 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4214 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4215 (simplify
4216 (cbrts (sqrts @0))
4217 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4218 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4219 (simplify
4220 (cbrts (cbrts tree_expr_nonnegative_p@0))
4221 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4222 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4223 (simplify
4224 (sqrts (pows @0 @1))
4225 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4226 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4227 (simplify
4228 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4229 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4230 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4231 (simplify
4232 (pows (sqrts @0) @1)
4233 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4234 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4235 (simplify
4236 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4237 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4238 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4239 (simplify
4240 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4241 (pows @0 (mult @1 @2))))
4242
4243 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4244 (simplify
4245 (CABS (complex @0 @0))
4246 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4247
4248 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4249 (simplify
4250 (HYPOT @0 @0)
4251 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4252
4253 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4254 (for cexps (CEXP)
4255 exps (EXP)
4256 cexpis (CEXPI)
4257 (simplify
4258 (cexps compositional_complex@0)
4259 (if (targetm.libc_has_function (function_c99_math_complex))
4260 (complex
4261 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4262 (mult @1 (imagpart @2)))))))
4263
4264 (if (canonicalize_math_p ())
4265 /* floor(x) -> trunc(x) if x is nonnegative. */
4266 (for floors (FLOOR_ALL)
4267 truncs (TRUNC_ALL)
4268 (simplify
4269 (floors tree_expr_nonnegative_p@0)
4270 (truncs @0))))
4271
4272 (match double_value_p
4273 @0
4274 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4275 (for froms (BUILT_IN_TRUNCL
4276 BUILT_IN_FLOORL
4277 BUILT_IN_CEILL
4278 BUILT_IN_ROUNDL
4279 BUILT_IN_NEARBYINTL
4280 BUILT_IN_RINTL)
4281 tos (BUILT_IN_TRUNC
4282 BUILT_IN_FLOOR
4283 BUILT_IN_CEIL
4284 BUILT_IN_ROUND
4285 BUILT_IN_NEARBYINT
4286 BUILT_IN_RINT)
4287 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4288 (if (optimize && canonicalize_math_p ())
4289 (simplify
4290 (froms (convert double_value_p@0))
4291 (convert (tos @0)))))
4292
4293 (match float_value_p
4294 @0
4295 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4296 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4297 BUILT_IN_FLOORL BUILT_IN_FLOOR
4298 BUILT_IN_CEILL BUILT_IN_CEIL
4299 BUILT_IN_ROUNDL BUILT_IN_ROUND
4300 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4301 BUILT_IN_RINTL BUILT_IN_RINT)
4302 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4303 BUILT_IN_FLOORF BUILT_IN_FLOORF
4304 BUILT_IN_CEILF BUILT_IN_CEILF
4305 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4306 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4307 BUILT_IN_RINTF BUILT_IN_RINTF)
4308 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4309 if x is a float. */
4310 (if (optimize && canonicalize_math_p ()
4311 && targetm.libc_has_function (function_c99_misc))
4312 (simplify
4313 (froms (convert float_value_p@0))
4314 (convert (tos @0)))))
4315
4316 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4317 tos (XFLOOR XCEIL XROUND XRINT)
4318 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4319 (if (optimize && canonicalize_math_p ())
4320 (simplify
4321 (froms (convert double_value_p@0))
4322 (tos @0))))
4323
4324 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4325 XFLOOR XCEIL XROUND XRINT)
4326 tos (XFLOORF XCEILF XROUNDF XRINTF)
4327 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4328 if x is a float. */
4329 (if (optimize && canonicalize_math_p ())
4330 (simplify
4331 (froms (convert float_value_p@0))
4332 (tos @0))))
4333
4334 (if (canonicalize_math_p ())
4335 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4336 (for floors (IFLOOR LFLOOR LLFLOOR)
4337 (simplify
4338 (floors tree_expr_nonnegative_p@0)
4339 (fix_trunc @0))))
4340
4341 (if (canonicalize_math_p ())
4342 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4343 (for fns (IFLOOR LFLOOR LLFLOOR
4344 ICEIL LCEIL LLCEIL
4345 IROUND LROUND LLROUND)
4346 (simplify
4347 (fns integer_valued_real_p@0)
4348 (fix_trunc @0)))
4349 (if (!flag_errno_math)
4350 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4351 (for rints (IRINT LRINT LLRINT)
4352 (simplify
4353 (rints integer_valued_real_p@0)
4354 (fix_trunc @0)))))
4355
4356 (if (canonicalize_math_p ())
4357 (for ifn (IFLOOR ICEIL IROUND IRINT)
4358 lfn (LFLOOR LCEIL LROUND LRINT)
4359 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4360 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4361 sizeof (int) == sizeof (long). */
4362 (if (TYPE_PRECISION (integer_type_node)
4363 == TYPE_PRECISION (long_integer_type_node))
4364 (simplify
4365 (ifn @0)
4366 (lfn:long_integer_type_node @0)))
4367 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4368 sizeof (long long) == sizeof (long). */
4369 (if (TYPE_PRECISION (long_long_integer_type_node)
4370 == TYPE_PRECISION (long_integer_type_node))
4371 (simplify
4372 (llfn @0)
4373 (lfn:long_integer_type_node @0)))))
4374
4375 /* cproj(x) -> x if we're ignoring infinities. */
4376 (simplify
4377 (CPROJ @0)
4378 (if (!HONOR_INFINITIES (type))
4379 @0))
4380
4381 /* If the real part is inf and the imag part is known to be
4382 nonnegative, return (inf + 0i). */
4383 (simplify
4384 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4385 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4386 { build_complex_inf (type, false); }))
4387
4388 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4389 (simplify
4390 (CPROJ (complex @0 REAL_CST@1))
4391 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4392 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4393
4394 (for pows (POW)
4395 sqrts (SQRT)
4396 cbrts (CBRT)
4397 (simplify
4398 (pows @0 REAL_CST@1)
4399 (with {
4400 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4401 REAL_VALUE_TYPE tmp;
4402 }
4403 (switch
4404 /* pow(x,0) -> 1. */
4405 (if (real_equal (value, &dconst0))
4406 { build_real (type, dconst1); })
4407 /* pow(x,1) -> x. */
4408 (if (real_equal (value, &dconst1))
4409 @0)
4410 /* pow(x,-1) -> 1/x. */
4411 (if (real_equal (value, &dconstm1))
4412 (rdiv { build_real (type, dconst1); } @0))
4413 /* pow(x,0.5) -> sqrt(x). */
4414 (if (flag_unsafe_math_optimizations
4415 && canonicalize_math_p ()
4416 && real_equal (value, &dconsthalf))
4417 (sqrts @0))
4418 /* pow(x,1/3) -> cbrt(x). */
4419 (if (flag_unsafe_math_optimizations
4420 && canonicalize_math_p ()
4421 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4422 real_equal (value, &tmp)))
4423 (cbrts @0))))))
4424
4425 /* powi(1,x) -> 1. */
4426 (simplify
4427 (POWI real_onep@0 @1)
4428 @0)
4429
4430 (simplify
4431 (POWI @0 INTEGER_CST@1)
4432 (switch
4433 /* powi(x,0) -> 1. */
4434 (if (wi::to_wide (@1) == 0)
4435 { build_real (type, dconst1); })
4436 /* powi(x,1) -> x. */
4437 (if (wi::to_wide (@1) == 1)
4438 @0)
4439 /* powi(x,-1) -> 1/x. */
4440 (if (wi::to_wide (@1) == -1)
4441 (rdiv { build_real (type, dconst1); } @0))))
4442
4443 /* Narrowing of arithmetic and logical operations.
4444
4445 These are conceptually similar to the transformations performed for
4446 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4447 term we want to move all that code out of the front-ends into here. */
4448
4449 /* If we have a narrowing conversion of an arithmetic operation where
4450 both operands are widening conversions from the same type as the outer
4451 narrowing conversion. Then convert the innermost operands to a suitable
4452 unsigned type (to avoid introducing undefined behavior), perform the
4453 operation and convert the result to the desired type. */
4454 (for op (plus minus)
4455 (simplify
4456 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4457 (if (INTEGRAL_TYPE_P (type)
4458 /* We check for type compatibility between @0 and @1 below,
4459 so there's no need to check that @1/@3 are integral types. */
4460 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4461 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4462 /* The precision of the type of each operand must match the
4463 precision of the mode of each operand, similarly for the
4464 result. */
4465 && type_has_mode_precision_p (TREE_TYPE (@0))
4466 && type_has_mode_precision_p (TREE_TYPE (@1))
4467 && type_has_mode_precision_p (type)
4468 /* The inner conversion must be a widening conversion. */
4469 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4470 && types_match (@0, type)
4471 && (types_match (@0, @1)
4472 /* Or the second operand is const integer or converted const
4473 integer from valueize. */
4474 || TREE_CODE (@1) == INTEGER_CST))
4475 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4476 (op @0 (convert @1))
4477 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4478 (convert (op (convert:utype @0)
4479 (convert:utype @1))))))))
4480
4481 /* This is another case of narrowing, specifically when there's an outer
4482 BIT_AND_EXPR which masks off bits outside the type of the innermost
4483 operands. Like the previous case we have to convert the operands
4484 to unsigned types to avoid introducing undefined behavior for the
4485 arithmetic operation. */
4486 (for op (minus plus)
4487 (simplify
4488 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4489 (if (INTEGRAL_TYPE_P (type)
4490 /* We check for type compatibility between @0 and @1 below,
4491 so there's no need to check that @1/@3 are integral types. */
4492 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4493 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4494 /* The precision of the type of each operand must match the
4495 precision of the mode of each operand, similarly for the
4496 result. */
4497 && type_has_mode_precision_p (TREE_TYPE (@0))
4498 && type_has_mode_precision_p (TREE_TYPE (@1))
4499 && type_has_mode_precision_p (type)
4500 /* The inner conversion must be a widening conversion. */
4501 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4502 && types_match (@0, @1)
4503 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4504 <= TYPE_PRECISION (TREE_TYPE (@0)))
4505 && (wi::to_wide (@4)
4506 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4507 true, TYPE_PRECISION (type))) == 0)
4508 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4509 (with { tree ntype = TREE_TYPE (@0); }
4510 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4511 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4512 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4513 (convert:utype @4))))))))
4514
4515 /* Transform (@0 < @1 and @0 < @2) to use min,
4516 (@0 > @1 and @0 > @2) to use max */
4517 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
4518 op (lt le gt ge lt le gt ge )
4519 ext (min min max max max max min min )
4520 (simplify
4521 (logic (op:cs @0 @1) (op:cs @0 @2))
4522 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4523 && TREE_CODE (@0) != INTEGER_CST)
4524 (op @0 (ext @1 @2)))))
4525
4526 (simplify
4527 /* signbit(x) -> 0 if x is nonnegative. */
4528 (SIGNBIT tree_expr_nonnegative_p@0)
4529 { integer_zero_node; })
4530
4531 (simplify
4532 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4533 (SIGNBIT @0)
4534 (if (!HONOR_SIGNED_ZEROS (@0))
4535 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4536
4537 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4538 (for cmp (eq ne)
4539 (for op (plus minus)
4540 rop (minus plus)
4541 (simplify
4542 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4543 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4544 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4545 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4546 && !TYPE_SATURATING (TREE_TYPE (@0)))
4547 (with { tree res = int_const_binop (rop, @2, @1); }
4548 (if (TREE_OVERFLOW (res)
4549 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4550 { constant_boolean_node (cmp == NE_EXPR, type); }
4551 (if (single_use (@3))
4552 (cmp @0 { TREE_OVERFLOW (res)
4553 ? drop_tree_overflow (res) : res; }))))))))
4554 (for cmp (lt le gt ge)
4555 (for op (plus minus)
4556 rop (minus plus)
4557 (simplify
4558 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4559 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4560 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4561 (with { tree res = int_const_binop (rop, @2, @1); }
4562 (if (TREE_OVERFLOW (res))
4563 {
4564 fold_overflow_warning (("assuming signed overflow does not occur "
4565 "when simplifying conditional to constant"),
4566 WARN_STRICT_OVERFLOW_CONDITIONAL);
4567 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4568 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4569 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4570 TYPE_SIGN (TREE_TYPE (@1)))
4571 != (op == MINUS_EXPR);
4572 constant_boolean_node (less == ovf_high, type);
4573 }
4574 (if (single_use (@3))
4575 (with
4576 {
4577 fold_overflow_warning (("assuming signed overflow does not occur "
4578 "when changing X +- C1 cmp C2 to "
4579 "X cmp C2 -+ C1"),
4580 WARN_STRICT_OVERFLOW_COMPARISON);
4581 }
4582 (cmp @0 { res; })))))))))
4583
4584 /* Canonicalizations of BIT_FIELD_REFs. */
4585
4586 (simplify
4587 (BIT_FIELD_REF @0 @1 @2)
4588 (switch
4589 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4590 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4591 (switch
4592 (if (integer_zerop (@2))
4593 (view_convert (realpart @0)))
4594 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4595 (view_convert (imagpart @0)))))
4596 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4597 && INTEGRAL_TYPE_P (type)
4598 /* On GIMPLE this should only apply to register arguments. */
4599 && (! GIMPLE || is_gimple_reg (@0))
4600 /* A bit-field-ref that referenced the full argument can be stripped. */
4601 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4602 && integer_zerop (@2))
4603 /* Low-parts can be reduced to integral conversions.
4604 ??? The following doesn't work for PDP endian. */
4605 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4606 /* Don't even think about BITS_BIG_ENDIAN. */
4607 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4608 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4609 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4610 ? (TYPE_PRECISION (TREE_TYPE (@0))
4611 - TYPE_PRECISION (type))
4612 : 0)) == 0)))
4613 (convert @0))))
4614
4615 /* Simplify vector extracts. */
4616
4617 (simplify
4618 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4619 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4620 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4621 || (VECTOR_TYPE_P (type)
4622 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4623 (with
4624 {
4625 tree ctor = (TREE_CODE (@0) == SSA_NAME
4626 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4627 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4628 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4629 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4630 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4631 }
4632 (if (n != 0
4633 && (idx % width) == 0
4634 && (n % width) == 0
4635 && known_le ((idx + n) / width,
4636 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
4637 (with
4638 {
4639 idx = idx / width;
4640 n = n / width;
4641 /* Constructor elements can be subvectors. */
4642 poly_uint64 k = 1;
4643 if (CONSTRUCTOR_NELTS (ctor) != 0)
4644 {
4645 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4646 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4647 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4648 }
4649 unsigned HOST_WIDE_INT elt, count, const_k;
4650 }
4651 (switch
4652 /* We keep an exact subset of the constructor elements. */
4653 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
4654 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4655 { build_constructor (type, NULL); }
4656 (if (count == 1)
4657 (if (elt < CONSTRUCTOR_NELTS (ctor))
4658 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
4659 { build_zero_cst (type); })
4660 {
4661 vec<constructor_elt, va_gc> *vals;
4662 vec_alloc (vals, count);
4663 for (unsigned i = 0;
4664 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
4665 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4666 CONSTRUCTOR_ELT (ctor, elt + i)->value);
4667 build_constructor (type, vals);
4668 })))
4669 /* The bitfield references a single constructor element. */
4670 (if (k.is_constant (&const_k)
4671 && idx + n <= (idx / const_k + 1) * const_k)
4672 (switch
4673 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
4674 { build_zero_cst (type); })
4675 (if (n == const_k)
4676 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
4677 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
4678 @1 { bitsize_int ((idx % const_k) * width); })))))))))
4679
4680 /* Simplify a bit extraction from a bit insertion for the cases with
4681 the inserted element fully covering the extraction or the insertion
4682 not touching the extraction. */
4683 (simplify
4684 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4685 (with
4686 {
4687 unsigned HOST_WIDE_INT isize;
4688 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4689 isize = TYPE_PRECISION (TREE_TYPE (@1));
4690 else
4691 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4692 }
4693 (switch
4694 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4695 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
4696 wi::to_wide (@ipos) + isize))
4697 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4698 wi::to_wide (@rpos)
4699 - wi::to_wide (@ipos)); }))
4700 (if (wi::geu_p (wi::to_wide (@ipos),
4701 wi::to_wide (@rpos) + wi::to_wide (@rsize))
4702 || wi::geu_p (wi::to_wide (@rpos),
4703 wi::to_wide (@ipos) + isize))
4704 (BIT_FIELD_REF @0 @rsize @rpos)))))