]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/match.pd
poly_int: MEM_REF offsets
[thirdparty/gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2017 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 CONSTANT_CLASS_P
33 tree_expr_nonnegative_p
34 tree_expr_nonzero_p
35 integer_valued_real_p
36 integer_pow2p
37 HONOR_NANS)
38
39 /* Operator lists. */
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
51 #include "cfn-operators.pd"
52
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
77
78 /* As opposed to convert?, this still creates a single pattern, so
79 it is not a suitable replacement for convert? in all cases. */
80 (match (nop_convert @0)
81 (convert @0)
82 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
83 (match (nop_convert @0)
84 (view_convert @0)
85 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
86 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
87 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
88 /* This one has to be last, or it shadows the others. */
89 (match (nop_convert @0)
90 @0)
91
92 /* Simplifications of operations with one constant operand and
93 simplifications to constants or single values. */
94
95 (for op (plus pointer_plus minus bit_ior bit_xor)
96 (simplify
97 (op @0 integer_zerop)
98 (non_lvalue @0)))
99
100 /* 0 +p index -> (type)index */
101 (simplify
102 (pointer_plus integer_zerop @1)
103 (non_lvalue (convert @1)))
104
105 /* ptr - 0 -> (type)ptr */
106 (simplify
107 (pointer_diff @0 integer_zerop)
108 (convert @0))
109
110 /* See if ARG1 is zero and X + ARG1 reduces to X.
111 Likewise if the operands are reversed. */
112 (simplify
113 (plus:c @0 real_zerop@1)
114 (if (fold_real_zero_addition_p (type, @1, 0))
115 (non_lvalue @0)))
116
117 /* See if ARG1 is zero and X - ARG1 reduces to X. */
118 (simplify
119 (minus @0 real_zerop@1)
120 (if (fold_real_zero_addition_p (type, @1, 1))
121 (non_lvalue @0)))
122
123 /* Simplify x - x.
124 This is unsafe for certain floats even in non-IEEE formats.
125 In IEEE, it is unsafe because it does wrong for NaNs.
126 Also note that operand_equal_p is always false if an operand
127 is volatile. */
128 (simplify
129 (minus @0 @0)
130 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
131 { build_zero_cst (type); }))
132 (simplify
133 (pointer_diff @@0 @0)
134 { build_zero_cst (type); })
135
136 (simplify
137 (mult @0 integer_zerop@1)
138 @1)
139
140 /* Maybe fold x * 0 to 0. The expressions aren't the same
141 when x is NaN, since x * 0 is also NaN. Nor are they the
142 same in modes with signed zeros, since multiplying a
143 negative value by 0 gives -0, not +0. */
144 (simplify
145 (mult @0 real_zerop@1)
146 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
147 @1))
148
149 /* In IEEE floating point, x*1 is not equivalent to x for snans.
150 Likewise for complex arithmetic with signed zeros. */
151 (simplify
152 (mult @0 real_onep)
153 (if (!HONOR_SNANS (type)
154 && (!HONOR_SIGNED_ZEROS (type)
155 || !COMPLEX_FLOAT_TYPE_P (type)))
156 (non_lvalue @0)))
157
158 /* Transform x * -1.0 into -x. */
159 (simplify
160 (mult @0 real_minus_onep)
161 (if (!HONOR_SNANS (type)
162 && (!HONOR_SIGNED_ZEROS (type)
163 || !COMPLEX_FLOAT_TYPE_P (type)))
164 (negate @0)))
165
166 (for cmp (gt ge lt le)
167 outp (convert convert negate negate)
168 outn (negate negate convert convert)
169 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
170 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
171 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
172 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
173 (simplify
174 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
175 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
176 && types_match (type, TREE_TYPE (@0)))
177 (switch
178 (if (types_match (type, float_type_node))
179 (BUILT_IN_COPYSIGNF @1 (outp @0)))
180 (if (types_match (type, double_type_node))
181 (BUILT_IN_COPYSIGN @1 (outp @0)))
182 (if (types_match (type, long_double_type_node))
183 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
184 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
185 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
186 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
187 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
188 (simplify
189 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
190 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
191 && types_match (type, TREE_TYPE (@0)))
192 (switch
193 (if (types_match (type, float_type_node))
194 (BUILT_IN_COPYSIGNF @1 (outn @0)))
195 (if (types_match (type, double_type_node))
196 (BUILT_IN_COPYSIGN @1 (outn @0)))
197 (if (types_match (type, long_double_type_node))
198 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
199
200 /* Transform X * copysign (1.0, X) into abs(X). */
201 (simplify
202 (mult:c @0 (COPYSIGN real_onep @0))
203 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
204 (abs @0)))
205
206 /* Transform X * copysign (1.0, -X) into -abs(X). */
207 (simplify
208 (mult:c @0 (COPYSIGN real_onep (negate @0)))
209 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
210 (negate (abs @0))))
211
212 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
213 (simplify
214 (COPYSIGN REAL_CST@0 @1)
215 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
216 (COPYSIGN (negate @0) @1)))
217
218 /* X * 1, X / 1 -> X. */
219 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
220 (simplify
221 (op @0 integer_onep)
222 (non_lvalue @0)))
223
224 /* (A / (1 << B)) -> (A >> B).
225 Only for unsigned A. For signed A, this would not preserve rounding
226 toward zero.
227 For example: (-1 / ( 1 << B)) != -1 >> B. */
228 (simplify
229 (trunc_div @0 (lshift integer_onep@1 @2))
230 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
231 && (!VECTOR_TYPE_P (type)
232 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
233 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
234 (rshift @0 @2)))
235
236 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
237 undefined behavior in constexpr evaluation, and assuming that the division
238 traps enables better optimizations than these anyway. */
239 (for div (trunc_div ceil_div floor_div round_div exact_div)
240 /* 0 / X is always zero. */
241 (simplify
242 (div integer_zerop@0 @1)
243 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
244 (if (!integer_zerop (@1))
245 @0))
246 /* X / -1 is -X. */
247 (simplify
248 (div @0 integer_minus_onep@1)
249 (if (!TYPE_UNSIGNED (type))
250 (negate @0)))
251 /* X / X is one. */
252 (simplify
253 (div @0 @0)
254 /* But not for 0 / 0 so that we can get the proper warnings and errors.
255 And not for _Fract types where we can't build 1. */
256 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
257 { build_one_cst (type); }))
258 /* X / abs (X) is X < 0 ? -1 : 1. */
259 (simplify
260 (div:C @0 (abs @0))
261 (if (INTEGRAL_TYPE_P (type)
262 && TYPE_OVERFLOW_UNDEFINED (type))
263 (cond (lt @0 { build_zero_cst (type); })
264 { build_minus_one_cst (type); } { build_one_cst (type); })))
265 /* X / -X is -1. */
266 (simplify
267 (div:C @0 (negate @0))
268 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
269 && TYPE_OVERFLOW_UNDEFINED (type))
270 { build_minus_one_cst (type); })))
271
272 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
273 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
274 (simplify
275 (floor_div @0 @1)
276 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
277 && TYPE_UNSIGNED (type))
278 (trunc_div @0 @1)))
279
280 /* Combine two successive divisions. Note that combining ceil_div
281 and floor_div is trickier and combining round_div even more so. */
282 (for div (trunc_div exact_div)
283 (simplify
284 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
285 (with {
286 bool overflow_p;
287 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
288 TYPE_SIGN (type), &overflow_p);
289 }
290 (if (!overflow_p)
291 (div @0 { wide_int_to_tree (type, mul); })
292 (if (TYPE_UNSIGNED (type)
293 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
294 { build_zero_cst (type); })))))
295
296 /* Combine successive multiplications. Similar to above, but handling
297 overflow is different. */
298 (simplify
299 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
300 (with {
301 bool overflow_p;
302 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
303 TYPE_SIGN (type), &overflow_p);
304 }
305 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
306 otherwise undefined overflow implies that @0 must be zero. */
307 (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
308 (mult @0 { wide_int_to_tree (type, mul); }))))
309
310 /* Optimize A / A to 1.0 if we don't care about
311 NaNs or Infinities. */
312 (simplify
313 (rdiv @0 @0)
314 (if (FLOAT_TYPE_P (type)
315 && ! HONOR_NANS (type)
316 && ! HONOR_INFINITIES (type))
317 { build_one_cst (type); }))
318
319 /* Optimize -A / A to -1.0 if we don't care about
320 NaNs or Infinities. */
321 (simplify
322 (rdiv:C @0 (negate @0))
323 (if (FLOAT_TYPE_P (type)
324 && ! HONOR_NANS (type)
325 && ! HONOR_INFINITIES (type))
326 { build_minus_one_cst (type); }))
327
328 /* PR71078: x / abs(x) -> copysign (1.0, x) */
329 (simplify
330 (rdiv:C (convert? @0) (convert? (abs @0)))
331 (if (SCALAR_FLOAT_TYPE_P (type)
332 && ! HONOR_NANS (type)
333 && ! HONOR_INFINITIES (type))
334 (switch
335 (if (types_match (type, float_type_node))
336 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
337 (if (types_match (type, double_type_node))
338 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
339 (if (types_match (type, long_double_type_node))
340 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
341
342 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
343 (simplify
344 (rdiv @0 real_onep)
345 (if (!HONOR_SNANS (type))
346 (non_lvalue @0)))
347
348 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
349 (simplify
350 (rdiv @0 real_minus_onep)
351 (if (!HONOR_SNANS (type))
352 (negate @0)))
353
354 (if (flag_reciprocal_math)
355 /* Convert (A/B)/C to A/(B*C). */
356 (simplify
357 (rdiv (rdiv:s @0 @1) @2)
358 (rdiv @0 (mult @1 @2)))
359
360 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
361 (simplify
362 (rdiv @0 (mult:s @1 REAL_CST@2))
363 (with
364 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
365 (if (tem)
366 (rdiv (mult @0 { tem; } ) @1))))
367
368 /* Convert A/(B/C) to (A/B)*C */
369 (simplify
370 (rdiv @0 (rdiv:s @1 @2))
371 (mult (rdiv @0 @1) @2)))
372
373 /* Simplify x / (- y) to -x / y. */
374 (simplify
375 (rdiv @0 (negate @1))
376 (rdiv (negate @0) @1))
377
378 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
379 (for div (trunc_div ceil_div floor_div round_div exact_div)
380 (simplify
381 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
382 (if (integer_pow2p (@2)
383 && tree_int_cst_sgn (@2) > 0
384 && tree_nop_conversion_p (type, TREE_TYPE (@0))
385 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
386 (rshift (convert @0)
387 { build_int_cst (integer_type_node,
388 wi::exact_log2 (wi::to_wide (@2))); }))))
389
390 /* If ARG1 is a constant, we can convert this to a multiply by the
391 reciprocal. This does not have the same rounding properties,
392 so only do this if -freciprocal-math. We can actually
393 always safely do it if ARG1 is a power of two, but it's hard to
394 tell if it is or not in a portable manner. */
395 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
396 (simplify
397 (rdiv @0 cst@1)
398 (if (optimize)
399 (if (flag_reciprocal_math
400 && !real_zerop (@1))
401 (with
402 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
403 (if (tem)
404 (mult @0 { tem; } )))
405 (if (cst != COMPLEX_CST)
406 (with { tree inverse = exact_inverse (type, @1); }
407 (if (inverse)
408 (mult @0 { inverse; } ))))))))
409
410 (for mod (ceil_mod floor_mod round_mod trunc_mod)
411 /* 0 % X is always zero. */
412 (simplify
413 (mod integer_zerop@0 @1)
414 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
415 (if (!integer_zerop (@1))
416 @0))
417 /* X % 1 is always zero. */
418 (simplify
419 (mod @0 integer_onep)
420 { build_zero_cst (type); })
421 /* X % -1 is zero. */
422 (simplify
423 (mod @0 integer_minus_onep@1)
424 (if (!TYPE_UNSIGNED (type))
425 { build_zero_cst (type); }))
426 /* X % X is zero. */
427 (simplify
428 (mod @0 @0)
429 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
430 (if (!integer_zerop (@0))
431 { build_zero_cst (type); }))
432 /* (X % Y) % Y is just X % Y. */
433 (simplify
434 (mod (mod@2 @0 @1) @1)
435 @2)
436 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
437 (simplify
438 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
439 (if (ANY_INTEGRAL_TYPE_P (type)
440 && TYPE_OVERFLOW_UNDEFINED (type)
441 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
442 TYPE_SIGN (type)))
443 { build_zero_cst (type); })))
444
445 /* X % -C is the same as X % C. */
446 (simplify
447 (trunc_mod @0 INTEGER_CST@1)
448 (if (TYPE_SIGN (type) == SIGNED
449 && !TREE_OVERFLOW (@1)
450 && wi::neg_p (wi::to_wide (@1))
451 && !TYPE_OVERFLOW_TRAPS (type)
452 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
453 && !sign_bit_p (@1, @1))
454 (trunc_mod @0 (negate @1))))
455
456 /* X % -Y is the same as X % Y. */
457 (simplify
458 (trunc_mod @0 (convert? (negate @1)))
459 (if (INTEGRAL_TYPE_P (type)
460 && !TYPE_UNSIGNED (type)
461 && !TYPE_OVERFLOW_TRAPS (type)
462 && tree_nop_conversion_p (type, TREE_TYPE (@1))
463 /* Avoid this transformation if X might be INT_MIN or
464 Y might be -1, because we would then change valid
465 INT_MIN % -(-1) into invalid INT_MIN % -1. */
466 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
467 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
468 (TREE_TYPE (@1))))))
469 (trunc_mod @0 (convert @1))))
470
471 /* X - (X / Y) * Y is the same as X % Y. */
472 (simplify
473 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
474 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
475 (convert (trunc_mod @0 @1))))
476
477 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
478 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
479 Also optimize A % (C << N) where C is a power of 2,
480 to A & ((C << N) - 1). */
481 (match (power_of_two_cand @1)
482 INTEGER_CST@1)
483 (match (power_of_two_cand @1)
484 (lshift INTEGER_CST@1 @2))
485 (for mod (trunc_mod floor_mod)
486 (simplify
487 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
488 (if ((TYPE_UNSIGNED (type)
489 || tree_expr_nonnegative_p (@0))
490 && tree_nop_conversion_p (type, TREE_TYPE (@3))
491 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
492 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
493
494 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
495 (simplify
496 (trunc_div (mult @0 integer_pow2p@1) @1)
497 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
498 (bit_and @0 { wide_int_to_tree
499 (type, wi::mask (TYPE_PRECISION (type)
500 - wi::exact_log2 (wi::to_wide (@1)),
501 false, TYPE_PRECISION (type))); })))
502
503 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
504 (simplify
505 (mult (trunc_div @0 integer_pow2p@1) @1)
506 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
507 (bit_and @0 (negate @1))))
508
509 /* Simplify (t * 2) / 2) -> t. */
510 (for div (trunc_div ceil_div floor_div round_div exact_div)
511 (simplify
512 (div (mult @0 @1) @1)
513 (if (ANY_INTEGRAL_TYPE_P (type)
514 && TYPE_OVERFLOW_UNDEFINED (type))
515 @0)))
516
517 (for op (negate abs)
518 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
519 (for coss (COS COSH)
520 (simplify
521 (coss (op @0))
522 (coss @0)))
523 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
524 (for pows (POW)
525 (simplify
526 (pows (op @0) REAL_CST@1)
527 (with { HOST_WIDE_INT n; }
528 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
529 (pows @0 @1)))))
530 /* Likewise for powi. */
531 (for pows (POWI)
532 (simplify
533 (pows (op @0) INTEGER_CST@1)
534 (if ((wi::to_wide (@1) & 1) == 0)
535 (pows @0 @1))))
536 /* Strip negate and abs from both operands of hypot. */
537 (for hypots (HYPOT)
538 (simplify
539 (hypots (op @0) @1)
540 (hypots @0 @1))
541 (simplify
542 (hypots @0 (op @1))
543 (hypots @0 @1)))
544 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
545 (for copysigns (COPYSIGN)
546 (simplify
547 (copysigns (op @0) @1)
548 (copysigns @0 @1))))
549
550 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
551 (simplify
552 (mult (abs@1 @0) @1)
553 (mult @0 @0))
554
555 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
556 (for coss (COS COSH)
557 copysigns (COPYSIGN)
558 (simplify
559 (coss (copysigns @0 @1))
560 (coss @0)))
561
562 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
563 (for pows (POW)
564 copysigns (COPYSIGN)
565 (simplify
566 (pows (copysigns @0 @2) REAL_CST@1)
567 (with { HOST_WIDE_INT n; }
568 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
569 (pows @0 @1)))))
570 /* Likewise for powi. */
571 (for pows (POWI)
572 copysigns (COPYSIGN)
573 (simplify
574 (pows (copysigns @0 @2) INTEGER_CST@1)
575 (if ((wi::to_wide (@1) & 1) == 0)
576 (pows @0 @1))))
577
578 (for hypots (HYPOT)
579 copysigns (COPYSIGN)
580 /* hypot(copysign(x, y), z) -> hypot(x, z). */
581 (simplify
582 (hypots (copysigns @0 @1) @2)
583 (hypots @0 @2))
584 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
585 (simplify
586 (hypots @0 (copysigns @1 @2))
587 (hypots @0 @1)))
588
589 /* copysign(x, CST) -> [-]abs (x). */
590 (for copysigns (COPYSIGN)
591 (simplify
592 (copysigns @0 REAL_CST@1)
593 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
594 (negate (abs @0))
595 (abs @0))))
596
597 /* copysign(copysign(x, y), z) -> copysign(x, z). */
598 (for copysigns (COPYSIGN)
599 (simplify
600 (copysigns (copysigns @0 @1) @2)
601 (copysigns @0 @2)))
602
603 /* copysign(x,y)*copysign(x,y) -> x*x. */
604 (for copysigns (COPYSIGN)
605 (simplify
606 (mult (copysigns@2 @0 @1) @2)
607 (mult @0 @0)))
608
609 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
610 (for ccoss (CCOS CCOSH)
611 (simplify
612 (ccoss (negate @0))
613 (ccoss @0)))
614
615 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
616 (for ops (conj negate)
617 (for cabss (CABS)
618 (simplify
619 (cabss (ops @0))
620 (cabss @0))))
621
622 /* Fold (a * (1 << b)) into (a << b) */
623 (simplify
624 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
625 (if (! FLOAT_TYPE_P (type)
626 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
627 (lshift @0 @2)))
628
629 /* Fold (1 << (C - x)) where C = precision(type) - 1
630 into ((1 << C) >> x). */
631 (simplify
632 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
633 (if (INTEGRAL_TYPE_P (type)
634 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
635 && single_use (@1))
636 (if (TYPE_UNSIGNED (type))
637 (rshift (lshift @0 @2) @3)
638 (with
639 { tree utype = unsigned_type_for (type); }
640 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
641
642 /* Fold (C1/X)*C2 into (C1*C2)/X. */
643 (simplify
644 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
645 (if (flag_associative_math
646 && single_use (@3))
647 (with
648 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
649 (if (tem)
650 (rdiv { tem; } @1)))))
651
652 /* Simplify ~X & X as zero. */
653 (simplify
654 (bit_and:c (convert? @0) (convert? (bit_not @0)))
655 { build_zero_cst (type); })
656
657 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
658 (simplify
659 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
660 (if (TYPE_UNSIGNED (type))
661 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
662
663 (for bitop (bit_and bit_ior)
664 cmp (eq ne)
665 /* PR35691: Transform
666 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
667 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
668 (simplify
669 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
670 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
671 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
672 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
673 (cmp (bit_ior @0 (convert @1)) @2)))
674 /* Transform:
675 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
676 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
677 (simplify
678 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
679 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
680 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
681 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
682 (cmp (bit_and @0 (convert @1)) @2))))
683
684 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
685 (simplify
686 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
687 (minus (bit_xor @0 @1) @1))
688 (simplify
689 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
690 (if (~wi::to_wide (@2) == wi::to_wide (@1))
691 (minus (bit_xor @0 @1) @1)))
692
693 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
694 (simplify
695 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
696 (minus @1 (bit_xor @0 @1)))
697
698 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
699 (for op (bit_ior bit_xor plus)
700 (simplify
701 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
702 (bit_xor @0 @1))
703 (simplify
704 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
705 (if (~wi::to_wide (@2) == wi::to_wide (@1))
706 (bit_xor @0 @1))))
707
708 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
709 (simplify
710 (bit_ior:c (bit_xor:c @0 @1) @0)
711 (bit_ior @0 @1))
712
713 /* (a & ~b) | (a ^ b) --> a ^ b */
714 (simplify
715 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
716 @2)
717
718 /* (a & ~b) ^ ~a --> ~(a & b) */
719 (simplify
720 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
721 (bit_not (bit_and @0 @1)))
722
723 /* (a | b) & ~(a ^ b) --> a & b */
724 (simplify
725 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
726 (bit_and @0 @1))
727
728 /* a | ~(a ^ b) --> a | ~b */
729 (simplify
730 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
731 (bit_ior @0 (bit_not @1)))
732
733 /* (a | b) | (a &^ b) --> a | b */
734 (for op (bit_and bit_xor)
735 (simplify
736 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
737 @2))
738
739 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
740 (simplify
741 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
742 @2)
743
744 /* ~(~a & b) --> a | ~b */
745 (simplify
746 (bit_not (bit_and:cs (bit_not @0) @1))
747 (bit_ior @0 (bit_not @1)))
748
749 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
750 #if GIMPLE
751 (simplify
752 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
753 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
754 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
755 (bit_xor @0 @1)))
756 #endif
757
758 /* X % Y is smaller than Y. */
759 (for cmp (lt ge)
760 (simplify
761 (cmp (trunc_mod @0 @1) @1)
762 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
763 { constant_boolean_node (cmp == LT_EXPR, type); })))
764 (for cmp (gt le)
765 (simplify
766 (cmp @1 (trunc_mod @0 @1))
767 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
768 { constant_boolean_node (cmp == GT_EXPR, type); })))
769
770 /* x | ~0 -> ~0 */
771 (simplify
772 (bit_ior @0 integer_all_onesp@1)
773 @1)
774
775 /* x | 0 -> x */
776 (simplify
777 (bit_ior @0 integer_zerop)
778 @0)
779
780 /* x & 0 -> 0 */
781 (simplify
782 (bit_and @0 integer_zerop@1)
783 @1)
784
785 /* ~x | x -> -1 */
786 /* ~x ^ x -> -1 */
787 /* ~x + x -> -1 */
788 (for op (bit_ior bit_xor plus)
789 (simplify
790 (op:c (convert? @0) (convert? (bit_not @0)))
791 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
792
793 /* x ^ x -> 0 */
794 (simplify
795 (bit_xor @0 @0)
796 { build_zero_cst (type); })
797
798 /* Canonicalize X ^ ~0 to ~X. */
799 (simplify
800 (bit_xor @0 integer_all_onesp@1)
801 (bit_not @0))
802
803 /* x & ~0 -> x */
804 (simplify
805 (bit_and @0 integer_all_onesp)
806 (non_lvalue @0))
807
808 /* x & x -> x, x | x -> x */
809 (for bitop (bit_and bit_ior)
810 (simplify
811 (bitop @0 @0)
812 (non_lvalue @0)))
813
814 /* x & C -> x if we know that x & ~C == 0. */
815 #if GIMPLE
816 (simplify
817 (bit_and SSA_NAME@0 INTEGER_CST@1)
818 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
819 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
820 @0))
821 #endif
822
823 /* x + (x & 1) -> (x + 1) & ~1 */
824 (simplify
825 (plus:c @0 (bit_and:s @0 integer_onep@1))
826 (bit_and (plus @0 @1) (bit_not @1)))
827
828 /* x & ~(x & y) -> x & ~y */
829 /* x | ~(x | y) -> x | ~y */
830 (for bitop (bit_and bit_ior)
831 (simplify
832 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
833 (bitop @0 (bit_not @1))))
834
835 /* (x | y) & ~x -> y & ~x */
836 /* (x & y) | ~x -> y | ~x */
837 (for bitop (bit_and bit_ior)
838 rbitop (bit_ior bit_and)
839 (simplify
840 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
841 (bitop @1 @2)))
842
843 /* (x & y) ^ (x | y) -> x ^ y */
844 (simplify
845 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
846 (bit_xor @0 @1))
847
848 /* (x ^ y) ^ (x | y) -> x & y */
849 (simplify
850 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
851 (bit_and @0 @1))
852
853 /* (x & y) + (x ^ y) -> x | y */
854 /* (x & y) | (x ^ y) -> x | y */
855 /* (x & y) ^ (x ^ y) -> x | y */
856 (for op (plus bit_ior bit_xor)
857 (simplify
858 (op:c (bit_and @0 @1) (bit_xor @0 @1))
859 (bit_ior @0 @1)))
860
861 /* (x & y) + (x | y) -> x + y */
862 (simplify
863 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
864 (plus @0 @1))
865
866 /* (x + y) - (x | y) -> x & y */
867 (simplify
868 (minus (plus @0 @1) (bit_ior @0 @1))
869 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
870 && !TYPE_SATURATING (type))
871 (bit_and @0 @1)))
872
873 /* (x + y) - (x & y) -> x | y */
874 (simplify
875 (minus (plus @0 @1) (bit_and @0 @1))
876 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
877 && !TYPE_SATURATING (type))
878 (bit_ior @0 @1)))
879
880 /* (x | y) - (x ^ y) -> x & y */
881 (simplify
882 (minus (bit_ior @0 @1) (bit_xor @0 @1))
883 (bit_and @0 @1))
884
885 /* (x | y) - (x & y) -> x ^ y */
886 (simplify
887 (minus (bit_ior @0 @1) (bit_and @0 @1))
888 (bit_xor @0 @1))
889
890 /* (x | y) & ~(x & y) -> x ^ y */
891 (simplify
892 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
893 (bit_xor @0 @1))
894
895 /* (x | y) & (~x ^ y) -> x & y */
896 (simplify
897 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
898 (bit_and @0 @1))
899
900 /* ~x & ~y -> ~(x | y)
901 ~x | ~y -> ~(x & y) */
902 (for op (bit_and bit_ior)
903 rop (bit_ior bit_and)
904 (simplify
905 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
906 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
907 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
908 (bit_not (rop (convert @0) (convert @1))))))
909
910 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
911 with a constant, and the two constants have no bits in common,
912 we should treat this as a BIT_IOR_EXPR since this may produce more
913 simplifications. */
914 (for op (bit_xor plus)
915 (simplify
916 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
917 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
918 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
919 && tree_nop_conversion_p (type, TREE_TYPE (@2))
920 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
921 (bit_ior (convert @4) (convert @5)))))
922
923 /* (X | Y) ^ X -> Y & ~ X*/
924 (simplify
925 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
926 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
927 (convert (bit_and @1 (bit_not @0)))))
928
929 /* Convert ~X ^ ~Y to X ^ Y. */
930 (simplify
931 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
932 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
933 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
934 (bit_xor (convert @0) (convert @1))))
935
936 /* Convert ~X ^ C to X ^ ~C. */
937 (simplify
938 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
939 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
940 (bit_xor (convert @0) (bit_not @1))))
941
942 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
943 (for opo (bit_and bit_xor)
944 opi (bit_xor bit_and)
945 (simplify
946 (opo:c (opi:c @0 @1) @1)
947 (bit_and (bit_not @0) @1)))
948
949 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
950 operands are another bit-wise operation with a common input. If so,
951 distribute the bit operations to save an operation and possibly two if
952 constants are involved. For example, convert
953 (A | B) & (A | C) into A | (B & C)
954 Further simplification will occur if B and C are constants. */
955 (for op (bit_and bit_ior bit_xor)
956 rop (bit_ior bit_and bit_and)
957 (simplify
958 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
959 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
960 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
961 (rop (convert @0) (op (convert @1) (convert @2))))))
962
963 /* Some simple reassociation for bit operations, also handled in reassoc. */
964 /* (X & Y) & Y -> X & Y
965 (X | Y) | Y -> X | Y */
966 (for op (bit_and bit_ior)
967 (simplify
968 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
969 @2))
970 /* (X ^ Y) ^ Y -> X */
971 (simplify
972 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
973 (convert @0))
974 /* (X & Y) & (X & Z) -> (X & Y) & Z
975 (X | Y) | (X | Z) -> (X | Y) | Z */
976 (for op (bit_and bit_ior)
977 (simplify
978 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
979 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
980 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
981 (if (single_use (@5) && single_use (@6))
982 (op @3 (convert @2))
983 (if (single_use (@3) && single_use (@4))
984 (op (convert @1) @5))))))
985 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
986 (simplify
987 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
988 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
989 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
990 (bit_xor (convert @1) (convert @2))))
991
992 (simplify
993 (abs (abs@1 @0))
994 @1)
995 (simplify
996 (abs (negate @0))
997 (abs @0))
998 (simplify
999 (abs tree_expr_nonnegative_p@0)
1000 @0)
1001
1002 /* A few cases of fold-const.c negate_expr_p predicate. */
1003 (match negate_expr_p
1004 INTEGER_CST
1005 (if ((INTEGRAL_TYPE_P (type)
1006 && TYPE_UNSIGNED (type))
1007 || (!TYPE_OVERFLOW_SANITIZED (type)
1008 && may_negate_without_overflow_p (t)))))
1009 (match negate_expr_p
1010 FIXED_CST)
1011 (match negate_expr_p
1012 (negate @0)
1013 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1014 (match negate_expr_p
1015 REAL_CST
1016 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1017 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1018 ways. */
1019 (match negate_expr_p
1020 VECTOR_CST
1021 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1022 (match negate_expr_p
1023 (minus @0 @1)
1024 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1025 || (FLOAT_TYPE_P (type)
1026 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1027 && !HONOR_SIGNED_ZEROS (type)))))
1028
1029 /* (-A) * (-B) -> A * B */
1030 (simplify
1031 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1032 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1033 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1034 (mult (convert @0) (convert (negate @1)))))
1035
1036 /* -(A + B) -> (-B) - A. */
1037 (simplify
1038 (negate (plus:c @0 negate_expr_p@1))
1039 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1040 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1041 (minus (negate @1) @0)))
1042
1043 /* -(A - B) -> B - A. */
1044 (simplify
1045 (negate (minus @0 @1))
1046 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1047 || (FLOAT_TYPE_P (type)
1048 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1049 && !HONOR_SIGNED_ZEROS (type)))
1050 (minus @1 @0)))
1051 (simplify
1052 (negate (pointer_diff @0 @1))
1053 (if (TYPE_OVERFLOW_UNDEFINED (type))
1054 (pointer_diff @1 @0)))
1055
1056 /* A - B -> A + (-B) if B is easily negatable. */
1057 (simplify
1058 (minus @0 negate_expr_p@1)
1059 (if (!FIXED_POINT_TYPE_P (type))
1060 (plus @0 (negate @1))))
1061
1062 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1063 when profitable.
1064 For bitwise binary operations apply operand conversions to the
1065 binary operation result instead of to the operands. This allows
1066 to combine successive conversions and bitwise binary operations.
1067 We combine the above two cases by using a conditional convert. */
1068 (for bitop (bit_and bit_ior bit_xor)
1069 (simplify
1070 (bitop (convert @0) (convert? @1))
1071 (if (((TREE_CODE (@1) == INTEGER_CST
1072 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1073 && int_fits_type_p (@1, TREE_TYPE (@0)))
1074 || types_match (@0, @1))
1075 /* ??? This transform conflicts with fold-const.c doing
1076 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1077 constants (if x has signed type, the sign bit cannot be set
1078 in c). This folds extension into the BIT_AND_EXPR.
1079 Restrict it to GIMPLE to avoid endless recursions. */
1080 && (bitop != BIT_AND_EXPR || GIMPLE)
1081 && (/* That's a good idea if the conversion widens the operand, thus
1082 after hoisting the conversion the operation will be narrower. */
1083 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1084 /* It's also a good idea if the conversion is to a non-integer
1085 mode. */
1086 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1087 /* Or if the precision of TO is not the same as the precision
1088 of its mode. */
1089 || !type_has_mode_precision_p (type)))
1090 (convert (bitop @0 (convert @1))))))
1091
1092 (for bitop (bit_and bit_ior)
1093 rbitop (bit_ior bit_and)
1094 /* (x | y) & x -> x */
1095 /* (x & y) | x -> x */
1096 (simplify
1097 (bitop:c (rbitop:c @0 @1) @0)
1098 @0)
1099 /* (~x | y) & x -> x & y */
1100 /* (~x & y) | x -> x | y */
1101 (simplify
1102 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1103 (bitop @0 @1)))
1104
1105 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1106 (simplify
1107 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1108 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1109
1110 /* Combine successive equal operations with constants. */
1111 (for bitop (bit_and bit_ior bit_xor)
1112 (simplify
1113 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1114 (bitop @0 (bitop @1 @2))))
1115
1116 /* Try simple folding for X op !X, and X op X with the help
1117 of the truth_valued_p and logical_inverted_value predicates. */
1118 (match truth_valued_p
1119 @0
1120 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1121 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1122 (match truth_valued_p
1123 (op @0 @1)))
1124 (match truth_valued_p
1125 (truth_not @0))
1126
1127 (match (logical_inverted_value @0)
1128 (truth_not @0))
1129 (match (logical_inverted_value @0)
1130 (bit_not truth_valued_p@0))
1131 (match (logical_inverted_value @0)
1132 (eq @0 integer_zerop))
1133 (match (logical_inverted_value @0)
1134 (ne truth_valued_p@0 integer_truep))
1135 (match (logical_inverted_value @0)
1136 (bit_xor truth_valued_p@0 integer_truep))
1137
1138 /* X & !X -> 0. */
1139 (simplify
1140 (bit_and:c @0 (logical_inverted_value @0))
1141 { build_zero_cst (type); })
1142 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1143 (for op (bit_ior bit_xor)
1144 (simplify
1145 (op:c truth_valued_p@0 (logical_inverted_value @0))
1146 { constant_boolean_node (true, type); }))
1147 /* X ==/!= !X is false/true. */
1148 (for op (eq ne)
1149 (simplify
1150 (op:c truth_valued_p@0 (logical_inverted_value @0))
1151 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1152
1153 /* ~~x -> x */
1154 (simplify
1155 (bit_not (bit_not @0))
1156 @0)
1157
1158 /* Convert ~ (-A) to A - 1. */
1159 (simplify
1160 (bit_not (convert? (negate @0)))
1161 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1162 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1163 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1164
1165 /* Convert - (~A) to A + 1. */
1166 (simplify
1167 (negate (nop_convert (bit_not @0)))
1168 (plus (view_convert @0) { build_each_one_cst (type); }))
1169
1170 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1171 (simplify
1172 (bit_not (convert? (minus @0 integer_each_onep)))
1173 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1174 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1175 (convert (negate @0))))
1176 (simplify
1177 (bit_not (convert? (plus @0 integer_all_onesp)))
1178 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1179 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1180 (convert (negate @0))))
1181
1182 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1183 (simplify
1184 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1185 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1186 (convert (bit_xor @0 (bit_not @1)))))
1187 (simplify
1188 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1189 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1190 (convert (bit_xor @0 @1))))
1191
1192 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1193 (simplify
1194 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1195 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1196 (bit_not (bit_xor (view_convert @0) @1))))
1197
1198 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1199 (simplify
1200 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1201 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1202
1203 /* Fold A - (A & B) into ~B & A. */
1204 (simplify
1205 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1206 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1207 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1208 (convert (bit_and (bit_not @1) @0))))
1209
1210 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1211 (for cmp (gt lt ge le)
1212 (simplify
1213 (mult (convert (cmp @0 @1)) @2)
1214 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1215
1216 /* For integral types with undefined overflow and C != 0 fold
1217 x * C EQ/NE y * C into x EQ/NE y. */
1218 (for cmp (eq ne)
1219 (simplify
1220 (cmp (mult:c @0 @1) (mult:c @2 @1))
1221 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1222 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1223 && tree_expr_nonzero_p (@1))
1224 (cmp @0 @2))))
1225
1226 /* For integral types with wrapping overflow and C odd fold
1227 x * C EQ/NE y * C into x EQ/NE y. */
1228 (for cmp (eq ne)
1229 (simplify
1230 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1231 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1232 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1233 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1234 (cmp @0 @2))))
1235
1236 /* For integral types with undefined overflow and C != 0 fold
1237 x * C RELOP y * C into:
1238
1239 x RELOP y for nonnegative C
1240 y RELOP x for negative C */
1241 (for cmp (lt gt le ge)
1242 (simplify
1243 (cmp (mult:c @0 @1) (mult:c @2 @1))
1244 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1245 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1246 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1247 (cmp @0 @2)
1248 (if (TREE_CODE (@1) == INTEGER_CST
1249 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1250 (cmp @2 @0))))))
1251
1252 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1253 (for cmp (le gt)
1254 icmp (gt le)
1255 (simplify
1256 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1257 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1258 && TYPE_UNSIGNED (TREE_TYPE (@0))
1259 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1260 && (wi::to_wide (@2)
1261 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1262 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1263 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1264
1265 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1266 (for cmp (simple_comparison)
1267 (simplify
1268 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1269 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1270 (cmp @0 @1))))
1271
1272 /* X / C1 op C2 into a simple range test. */
1273 (for cmp (simple_comparison)
1274 (simplify
1275 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1276 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1277 && integer_nonzerop (@1)
1278 && !TREE_OVERFLOW (@1)
1279 && !TREE_OVERFLOW (@2))
1280 (with { tree lo, hi; bool neg_overflow;
1281 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1282 &neg_overflow); }
1283 (switch
1284 (if (code == LT_EXPR || code == GE_EXPR)
1285 (if (TREE_OVERFLOW (lo))
1286 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1287 (if (code == LT_EXPR)
1288 (lt @0 { lo; })
1289 (ge @0 { lo; }))))
1290 (if (code == LE_EXPR || code == GT_EXPR)
1291 (if (TREE_OVERFLOW (hi))
1292 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1293 (if (code == LE_EXPR)
1294 (le @0 { hi; })
1295 (gt @0 { hi; }))))
1296 (if (!lo && !hi)
1297 { build_int_cst (type, code == NE_EXPR); })
1298 (if (code == EQ_EXPR && !hi)
1299 (ge @0 { lo; }))
1300 (if (code == EQ_EXPR && !lo)
1301 (le @0 { hi; }))
1302 (if (code == NE_EXPR && !hi)
1303 (lt @0 { lo; }))
1304 (if (code == NE_EXPR && !lo)
1305 (gt @0 { hi; }))
1306 (if (GENERIC)
1307 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1308 lo, hi); })
1309 (with
1310 {
1311 tree etype = range_check_type (TREE_TYPE (@0));
1312 if (etype)
1313 {
1314 if (! TYPE_UNSIGNED (etype))
1315 etype = unsigned_type_for (etype);
1316 hi = fold_convert (etype, hi);
1317 lo = fold_convert (etype, lo);
1318 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1319 }
1320 }
1321 (if (etype && hi && !TREE_OVERFLOW (hi))
1322 (if (code == EQ_EXPR)
1323 (le (minus (convert:etype @0) { lo; }) { hi; })
1324 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1325
1326 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1327 (for op (lt le ge gt)
1328 (simplify
1329 (op (plus:c @0 @2) (plus:c @1 @2))
1330 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1331 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1332 (op @0 @1))))
1333 /* For equality and subtraction, this is also true with wrapping overflow. */
1334 (for op (eq ne minus)
1335 (simplify
1336 (op (plus:c @0 @2) (plus:c @1 @2))
1337 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1338 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1339 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1340 (op @0 @1))))
1341
1342 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1343 (for op (lt le ge gt)
1344 (simplify
1345 (op (minus @0 @2) (minus @1 @2))
1346 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1347 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1348 (op @0 @1))))
1349 /* For equality and subtraction, this is also true with wrapping overflow. */
1350 (for op (eq ne minus)
1351 (simplify
1352 (op (minus @0 @2) (minus @1 @2))
1353 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1354 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1355 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1356 (op @0 @1))))
1357 /* And for pointers... */
1358 (for op (simple_comparison)
1359 (simplify
1360 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1361 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1362 (op @0 @1))))
1363 (simplify
1364 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1365 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1366 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1367 (pointer_diff @0 @1)))
1368
1369 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1370 (for op (lt le ge gt)
1371 (simplify
1372 (op (minus @2 @0) (minus @2 @1))
1373 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1374 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1375 (op @1 @0))))
1376 /* For equality and subtraction, this is also true with wrapping overflow. */
1377 (for op (eq ne minus)
1378 (simplify
1379 (op (minus @2 @0) (minus @2 @1))
1380 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1381 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1382 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1383 (op @1 @0))))
1384 /* And for pointers... */
1385 (for op (simple_comparison)
1386 (simplify
1387 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1388 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1389 (op @1 @0))))
1390 (simplify
1391 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1392 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1393 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1394 (pointer_diff @1 @0)))
1395
1396 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1397 (for op (lt le gt ge)
1398 (simplify
1399 (op:c (plus:c@2 @0 @1) @1)
1400 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1401 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1402 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1403 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1404 /* For equality, this is also true with wrapping overflow. */
1405 (for op (eq ne)
1406 (simplify
1407 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1408 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1409 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1410 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1411 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1412 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1413 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1414 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1415 (simplify
1416 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1417 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1418 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1419 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1420 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1421
1422 /* X - Y < X is the same as Y > 0 when there is no overflow.
1423 For equality, this is also true with wrapping overflow. */
1424 (for op (simple_comparison)
1425 (simplify
1426 (op:c @0 (minus@2 @0 @1))
1427 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1428 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1429 || ((op == EQ_EXPR || op == NE_EXPR)
1430 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1431 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1432 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1433
1434 /* Transform:
1435 * (X / Y) == 0 -> X < Y if X, Y are unsigned.
1436 * (X / Y) != 0 -> X >= Y, if X, Y are unsigned.
1437 */
1438 (for cmp (eq ne)
1439 ocmp (lt ge)
1440 (simplify
1441 (cmp (trunc_div @0 @1) integer_zerop)
1442 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1443 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1444 (ocmp @0 @1))))
1445
1446 /* X == C - X can never be true if C is odd. */
1447 (for cmp (eq ne)
1448 (simplify
1449 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1450 (if (TREE_INT_CST_LOW (@1) & 1)
1451 { constant_boolean_node (cmp == NE_EXPR, type); })))
1452
1453 /* Arguments on which one can call get_nonzero_bits to get the bits
1454 possibly set. */
1455 (match with_possible_nonzero_bits
1456 INTEGER_CST@0)
1457 (match with_possible_nonzero_bits
1458 SSA_NAME@0
1459 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1460 /* Slightly extended version, do not make it recursive to keep it cheap. */
1461 (match (with_possible_nonzero_bits2 @0)
1462 with_possible_nonzero_bits@0)
1463 (match (with_possible_nonzero_bits2 @0)
1464 (bit_and:c with_possible_nonzero_bits@0 @2))
1465
1466 /* Same for bits that are known to be set, but we do not have
1467 an equivalent to get_nonzero_bits yet. */
1468 (match (with_certain_nonzero_bits2 @0)
1469 INTEGER_CST@0)
1470 (match (with_certain_nonzero_bits2 @0)
1471 (bit_ior @1 INTEGER_CST@0))
1472
1473 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1474 (for cmp (eq ne)
1475 (simplify
1476 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1477 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1478 { constant_boolean_node (cmp == NE_EXPR, type); })))
1479
1480 /* ((X inner_op C0) outer_op C1)
1481 With X being a tree where value_range has reasoned certain bits to always be
1482 zero throughout its computed value range,
1483 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1484 where zero_mask has 1's for all bits that are sure to be 0 in
1485 and 0's otherwise.
1486 if (inner_op == '^') C0 &= ~C1;
1487 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1488 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1489 */
1490 (for inner_op (bit_ior bit_xor)
1491 outer_op (bit_xor bit_ior)
1492 (simplify
1493 (outer_op
1494 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1495 (with
1496 {
1497 bool fail = false;
1498 wide_int zero_mask_not;
1499 wide_int C0;
1500 wide_int cst_emit;
1501
1502 if (TREE_CODE (@2) == SSA_NAME)
1503 zero_mask_not = get_nonzero_bits (@2);
1504 else
1505 fail = true;
1506
1507 if (inner_op == BIT_XOR_EXPR)
1508 {
1509 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1510 cst_emit = C0 | wi::to_wide (@1);
1511 }
1512 else
1513 {
1514 C0 = wi::to_wide (@0);
1515 cst_emit = C0 ^ wi::to_wide (@1);
1516 }
1517 }
1518 (if (!fail && (C0 & zero_mask_not) == 0)
1519 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1520 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1521 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1522
1523 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1524 (simplify
1525 (pointer_plus (pointer_plus:s @0 @1) @3)
1526 (pointer_plus @0 (plus @1 @3)))
1527
1528 /* Pattern match
1529 tem1 = (long) ptr1;
1530 tem2 = (long) ptr2;
1531 tem3 = tem2 - tem1;
1532 tem4 = (unsigned long) tem3;
1533 tem5 = ptr1 + tem4;
1534 and produce
1535 tem5 = ptr2; */
1536 (simplify
1537 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1538 /* Conditionally look through a sign-changing conversion. */
1539 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1540 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1541 || (GENERIC && type == TREE_TYPE (@1))))
1542 @1))
1543 (simplify
1544 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1545 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1546 (convert @1)))
1547
1548 /* Pattern match
1549 tem = (sizetype) ptr;
1550 tem = tem & algn;
1551 tem = -tem;
1552 ... = ptr p+ tem;
1553 and produce the simpler and easier to analyze with respect to alignment
1554 ... = ptr & ~algn; */
1555 (simplify
1556 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1557 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1558 (bit_and @0 { algn; })))
1559
1560 /* Try folding difference of addresses. */
1561 (simplify
1562 (minus (convert ADDR_EXPR@0) (convert @1))
1563 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1564 (with { poly_int64 diff; }
1565 (if (ptr_difference_const (@0, @1, &diff))
1566 { build_int_cst_type (type, diff); }))))
1567 (simplify
1568 (minus (convert @0) (convert ADDR_EXPR@1))
1569 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1570 (with { poly_int64 diff; }
1571 (if (ptr_difference_const (@0, @1, &diff))
1572 { build_int_cst_type (type, diff); }))))
1573 (simplify
1574 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert?@3 @1))
1575 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1576 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1577 (with { poly_int64 diff; }
1578 (if (ptr_difference_const (@0, @1, &diff))
1579 { build_int_cst_type (type, diff); }))))
1580 (simplify
1581 (pointer_diff (convert?@2 @0) (convert?@3 ADDR_EXPR@1))
1582 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1583 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1584 (with { poly_int64 diff; }
1585 (if (ptr_difference_const (@0, @1, &diff))
1586 { build_int_cst_type (type, diff); }))))
1587
1588 /* If arg0 is derived from the address of an object or function, we may
1589 be able to fold this expression using the object or function's
1590 alignment. */
1591 (simplify
1592 (bit_and (convert? @0) INTEGER_CST@1)
1593 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1594 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1595 (with
1596 {
1597 unsigned int align;
1598 unsigned HOST_WIDE_INT bitpos;
1599 get_pointer_alignment_1 (@0, &align, &bitpos);
1600 }
1601 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1602 { wide_int_to_tree (type, (wi::to_wide (@1)
1603 & (bitpos / BITS_PER_UNIT))); }))))
1604
1605
1606 /* We can't reassociate at all for saturating types. */
1607 (if (!TYPE_SATURATING (type))
1608
1609 /* Contract negates. */
1610 /* A + (-B) -> A - B */
1611 (simplify
1612 (plus:c @0 (convert? (negate @1)))
1613 /* Apply STRIP_NOPS on the negate. */
1614 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1615 && !TYPE_OVERFLOW_SANITIZED (type))
1616 (with
1617 {
1618 tree t1 = type;
1619 if (INTEGRAL_TYPE_P (type)
1620 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1621 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1622 }
1623 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1624 /* A - (-B) -> A + B */
1625 (simplify
1626 (minus @0 (convert? (negate @1)))
1627 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1628 && !TYPE_OVERFLOW_SANITIZED (type))
1629 (with
1630 {
1631 tree t1 = type;
1632 if (INTEGRAL_TYPE_P (type)
1633 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1634 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1635 }
1636 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1637 /* -(T)(-A) -> (T)A
1638 Sign-extension is ok except for INT_MIN, which thankfully cannot
1639 happen without overflow. */
1640 (simplify
1641 (negate (convert (negate @1)))
1642 (if (INTEGRAL_TYPE_P (type)
1643 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1644 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1645 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1646 && !TYPE_OVERFLOW_SANITIZED (type)
1647 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1648 (convert @1)))
1649 (simplify
1650 (negate (convert negate_expr_p@1))
1651 (if (SCALAR_FLOAT_TYPE_P (type)
1652 && ((DECIMAL_FLOAT_TYPE_P (type)
1653 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1654 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1655 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1656 (convert (negate @1))))
1657 (simplify
1658 (negate (nop_convert (negate @1)))
1659 (if (!TYPE_OVERFLOW_SANITIZED (type)
1660 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1661 (view_convert @1)))
1662
1663 /* We can't reassociate floating-point unless -fassociative-math
1664 or fixed-point plus or minus because of saturation to +-Inf. */
1665 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1666 && !FIXED_POINT_TYPE_P (type))
1667
1668 /* Match patterns that allow contracting a plus-minus pair
1669 irrespective of overflow issues. */
1670 /* (A +- B) - A -> +- B */
1671 /* (A +- B) -+ B -> A */
1672 /* A - (A +- B) -> -+ B */
1673 /* A +- (B -+ A) -> +- B */
1674 (simplify
1675 (minus (plus:c @0 @1) @0)
1676 @1)
1677 (simplify
1678 (minus (minus @0 @1) @0)
1679 (negate @1))
1680 (simplify
1681 (plus:c (minus @0 @1) @1)
1682 @0)
1683 (simplify
1684 (minus @0 (plus:c @0 @1))
1685 (negate @1))
1686 (simplify
1687 (minus @0 (minus @0 @1))
1688 @1)
1689 /* (A +- B) + (C - A) -> C +- B */
1690 /* (A + B) - (A - C) -> B + C */
1691 /* More cases are handled with comparisons. */
1692 (simplify
1693 (plus:c (plus:c @0 @1) (minus @2 @0))
1694 (plus @2 @1))
1695 (simplify
1696 (plus:c (minus @0 @1) (minus @2 @0))
1697 (minus @2 @1))
1698 (simplify
1699 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1700 (if (TYPE_OVERFLOW_UNDEFINED (type)
1701 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1702 (pointer_diff @2 @1)))
1703 (simplify
1704 (minus (plus:c @0 @1) (minus @0 @2))
1705 (plus @1 @2))
1706
1707 /* (A +- CST1) +- CST2 -> A + CST3
1708 Use view_convert because it is safe for vectors and equivalent for
1709 scalars. */
1710 (for outer_op (plus minus)
1711 (for inner_op (plus minus)
1712 neg_inner_op (minus plus)
1713 (simplify
1714 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1715 CONSTANT_CLASS_P@2)
1716 /* If one of the types wraps, use that one. */
1717 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1718 (if (outer_op == PLUS_EXPR)
1719 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1720 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))
1721 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1722 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1723 (if (outer_op == PLUS_EXPR)
1724 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1725 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1726 /* If the constant operation overflows we cannot do the transform
1727 directly as we would introduce undefined overflow, for example
1728 with (a - 1) + INT_MIN. */
1729 (if (types_match (type, @0))
1730 (with { tree cst = const_binop (outer_op == inner_op
1731 ? PLUS_EXPR : MINUS_EXPR,
1732 type, @1, @2); }
1733 (if (cst && !TREE_OVERFLOW (cst))
1734 (inner_op @0 { cst; } )
1735 /* X+INT_MAX+1 is X-INT_MIN. */
1736 (if (INTEGRAL_TYPE_P (type) && cst
1737 && wi::to_wide (cst) == wi::min_value (type))
1738 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1739 /* Last resort, use some unsigned type. */
1740 (with { tree utype = unsigned_type_for (type); }
1741 (view_convert (inner_op
1742 (view_convert:utype @0)
1743 (view_convert:utype
1744 { drop_tree_overflow (cst); })))))))))))))
1745
1746 /* (CST1 - A) +- CST2 -> CST3 - A */
1747 (for outer_op (plus minus)
1748 (simplify
1749 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1750 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1751 (if (cst && !TREE_OVERFLOW (cst))
1752 (minus { cst; } @0)))))
1753
1754 /* CST1 - (CST2 - A) -> CST3 + A */
1755 (simplify
1756 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1757 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1758 (if (cst && !TREE_OVERFLOW (cst))
1759 (plus { cst; } @0))))
1760
1761 /* ~A + A -> -1 */
1762 (simplify
1763 (plus:c (bit_not @0) @0)
1764 (if (!TYPE_OVERFLOW_TRAPS (type))
1765 { build_all_ones_cst (type); }))
1766
1767 /* ~A + 1 -> -A */
1768 (simplify
1769 (plus (convert? (bit_not @0)) integer_each_onep)
1770 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1771 (negate (convert @0))))
1772
1773 /* -A - 1 -> ~A */
1774 (simplify
1775 (minus (convert? (negate @0)) integer_each_onep)
1776 (if (!TYPE_OVERFLOW_TRAPS (type)
1777 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1778 (bit_not (convert @0))))
1779
1780 /* -1 - A -> ~A */
1781 (simplify
1782 (minus integer_all_onesp @0)
1783 (bit_not @0))
1784
1785 /* (T)(P + A) - (T)P -> (T) A */
1786 (simplify
1787 (minus (convert (plus:c @@0 @1))
1788 (convert? @0))
1789 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1790 /* For integer types, if A has a smaller type
1791 than T the result depends on the possible
1792 overflow in P + A.
1793 E.g. T=size_t, A=(unsigned)429497295, P>0.
1794 However, if an overflow in P + A would cause
1795 undefined behavior, we can assume that there
1796 is no overflow. */
1797 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1798 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1799 (convert @1)))
1800 (simplify
1801 (minus (convert (pointer_plus @@0 @1))
1802 (convert @0))
1803 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1804 /* For pointer types, if the conversion of A to the
1805 final type requires a sign- or zero-extension,
1806 then we have to punt - it is not defined which
1807 one is correct. */
1808 || (POINTER_TYPE_P (TREE_TYPE (@0))
1809 && TREE_CODE (@1) == INTEGER_CST
1810 && tree_int_cst_sign_bit (@1) == 0))
1811 (convert @1)))
1812 (simplify
1813 (pointer_diff (pointer_plus @@0 @1) @0)
1814 /* The second argument of pointer_plus must be interpreted as signed, and
1815 thus sign-extended if necessary. */
1816 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1817 (convert (convert:stype @1))))
1818
1819 /* (T)P - (T)(P + A) -> -(T) A */
1820 (simplify
1821 (minus (convert? @0)
1822 (convert (plus:c @@0 @1)))
1823 (if (INTEGRAL_TYPE_P (type)
1824 && TYPE_OVERFLOW_UNDEFINED (type)
1825 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1826 (with { tree utype = unsigned_type_for (type); }
1827 (convert (negate (convert:utype @1))))
1828 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1829 /* For integer types, if A has a smaller type
1830 than T the result depends on the possible
1831 overflow in P + A.
1832 E.g. T=size_t, A=(unsigned)429497295, P>0.
1833 However, if an overflow in P + A would cause
1834 undefined behavior, we can assume that there
1835 is no overflow. */
1836 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1837 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1838 (negate (convert @1)))))
1839 (simplify
1840 (minus (convert @0)
1841 (convert (pointer_plus @@0 @1)))
1842 (if (INTEGRAL_TYPE_P (type)
1843 && TYPE_OVERFLOW_UNDEFINED (type)
1844 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1845 (with { tree utype = unsigned_type_for (type); }
1846 (convert (negate (convert:utype @1))))
1847 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1848 /* For pointer types, if the conversion of A to the
1849 final type requires a sign- or zero-extension,
1850 then we have to punt - it is not defined which
1851 one is correct. */
1852 || (POINTER_TYPE_P (TREE_TYPE (@0))
1853 && TREE_CODE (@1) == INTEGER_CST
1854 && tree_int_cst_sign_bit (@1) == 0))
1855 (negate (convert @1)))))
1856 (simplify
1857 (pointer_diff @0 (pointer_plus @@0 @1))
1858 /* The second argument of pointer_plus must be interpreted as signed, and
1859 thus sign-extended if necessary. */
1860 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1861 (negate (convert (convert:stype @1)))))
1862
1863 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1864 (simplify
1865 (minus (convert (plus:c @@0 @1))
1866 (convert (plus:c @0 @2)))
1867 (if (INTEGRAL_TYPE_P (type)
1868 && TYPE_OVERFLOW_UNDEFINED (type)
1869 && element_precision (type) <= element_precision (TREE_TYPE (@1))
1870 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
1871 (with { tree utype = unsigned_type_for (type); }
1872 (convert (minus (convert:utype @1) (convert:utype @2))))
1873 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
1874 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
1875 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
1876 /* For integer types, if A has a smaller type
1877 than T the result depends on the possible
1878 overflow in P + A.
1879 E.g. T=size_t, A=(unsigned)429497295, P>0.
1880 However, if an overflow in P + A would cause
1881 undefined behavior, we can assume that there
1882 is no overflow. */
1883 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1884 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
1885 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
1886 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
1887 (minus (convert @1) (convert @2)))))
1888 (simplify
1889 (minus (convert (pointer_plus @@0 @1))
1890 (convert (pointer_plus @0 @2)))
1891 (if (INTEGRAL_TYPE_P (type)
1892 && TYPE_OVERFLOW_UNDEFINED (type)
1893 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1894 (with { tree utype = unsigned_type_for (type); }
1895 (convert (minus (convert:utype @1) (convert:utype @2))))
1896 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1897 /* For pointer types, if the conversion of A to the
1898 final type requires a sign- or zero-extension,
1899 then we have to punt - it is not defined which
1900 one is correct. */
1901 || (POINTER_TYPE_P (TREE_TYPE (@0))
1902 && TREE_CODE (@1) == INTEGER_CST
1903 && tree_int_cst_sign_bit (@1) == 0
1904 && TREE_CODE (@2) == INTEGER_CST
1905 && tree_int_cst_sign_bit (@2) == 0))
1906 (minus (convert @1) (convert @2)))))
1907 (simplify
1908 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
1909 /* The second argument of pointer_plus must be interpreted as signed, and
1910 thus sign-extended if necessary. */
1911 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1912 (minus (convert (convert:stype @1)) (convert (convert:stype @2)))))))
1913
1914
1915 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1916
1917 (for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
1918 (simplify
1919 (minmax @0 @0)
1920 @0))
1921 /* min(max(x,y),y) -> y. */
1922 (simplify
1923 (min:c (max:c @0 @1) @1)
1924 @1)
1925 /* max(min(x,y),y) -> y. */
1926 (simplify
1927 (max:c (min:c @0 @1) @1)
1928 @1)
1929 /* max(a,-a) -> abs(a). */
1930 (simplify
1931 (max:c @0 (negate @0))
1932 (if (TREE_CODE (type) != COMPLEX_TYPE
1933 && (! ANY_INTEGRAL_TYPE_P (type)
1934 || TYPE_OVERFLOW_UNDEFINED (type)))
1935 (abs @0)))
1936 /* min(a,-a) -> -abs(a). */
1937 (simplify
1938 (min:c @0 (negate @0))
1939 (if (TREE_CODE (type) != COMPLEX_TYPE
1940 && (! ANY_INTEGRAL_TYPE_P (type)
1941 || TYPE_OVERFLOW_UNDEFINED (type)))
1942 (negate (abs @0))))
1943 (simplify
1944 (min @0 @1)
1945 (switch
1946 (if (INTEGRAL_TYPE_P (type)
1947 && TYPE_MIN_VALUE (type)
1948 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1949 @1)
1950 (if (INTEGRAL_TYPE_P (type)
1951 && TYPE_MAX_VALUE (type)
1952 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1953 @0)))
1954 (simplify
1955 (max @0 @1)
1956 (switch
1957 (if (INTEGRAL_TYPE_P (type)
1958 && TYPE_MAX_VALUE (type)
1959 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1960 @1)
1961 (if (INTEGRAL_TYPE_P (type)
1962 && TYPE_MIN_VALUE (type)
1963 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1964 @0)))
1965
1966 /* max (a, a + CST) -> a + CST where CST is positive. */
1967 /* max (a, a + CST) -> a where CST is negative. */
1968 (simplify
1969 (max:c @0 (plus@2 @0 INTEGER_CST@1))
1970 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1971 (if (tree_int_cst_sgn (@1) > 0)
1972 @2
1973 @0)))
1974
1975 /* min (a, a + CST) -> a where CST is positive. */
1976 /* min (a, a + CST) -> a + CST where CST is negative. */
1977 (simplify
1978 (min:c @0 (plus@2 @0 INTEGER_CST@1))
1979 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1980 (if (tree_int_cst_sgn (@1) > 0)
1981 @0
1982 @2)))
1983
1984 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1985 and the outer convert demotes the expression back to x's type. */
1986 (for minmax (min max)
1987 (simplify
1988 (convert (minmax@0 (convert @1) INTEGER_CST@2))
1989 (if (INTEGRAL_TYPE_P (type)
1990 && types_match (@1, type) && int_fits_type_p (@2, type)
1991 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1992 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1993 (minmax @1 (convert @2)))))
1994
1995 (for minmax (FMIN FMIN_FN FMAX FMAX_FN)
1996 /* If either argument is NaN, return the other one. Avoid the
1997 transformation if we get (and honor) a signalling NaN. */
1998 (simplify
1999 (minmax:c @0 REAL_CST@1)
2000 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2001 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2002 @0)))
2003 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2004 functions to return the numeric arg if the other one is NaN.
2005 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2006 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2007 worry about it either. */
2008 (if (flag_finite_math_only)
2009 (simplify
2010 (FMIN @0 @1)
2011 (min @0 @1))
2012 (simplify
2013 (FMIN_FN @0 @1)
2014 (min @0 @1))
2015 (simplify
2016 (FMAX @0 @1)
2017 (max @0 @1))
2018 (simplify
2019 (FMAX_FN @0 @1)
2020 (max @0 @1)))
2021 /* min (-A, -B) -> -max (A, B) */
2022 (for minmax (min max FMIN FMIN_FN FMAX FMAX_FN)
2023 maxmin (max min FMAX FMAX_FN FMIN FMAX_FN)
2024 (simplify
2025 (minmax (negate:s@2 @0) (negate:s@3 @1))
2026 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2027 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2028 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2029 (negate (maxmin @0 @1)))))
2030 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2031 MAX (~X, ~Y) -> ~MIN (X, Y) */
2032 (for minmax (min max)
2033 maxmin (max min)
2034 (simplify
2035 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2036 (bit_not (maxmin @0 @1))))
2037
2038 /* MIN (X, Y) == X -> X <= Y */
2039 (for minmax (min min max max)
2040 cmp (eq ne eq ne )
2041 out (le gt ge lt )
2042 (simplify
2043 (cmp:c (minmax:c @0 @1) @0)
2044 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2045 (out @0 @1))))
2046 /* MIN (X, 5) == 0 -> X == 0
2047 MIN (X, 5) == 7 -> false */
2048 (for cmp (eq ne)
2049 (simplify
2050 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2051 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2052 TYPE_SIGN (TREE_TYPE (@0))))
2053 { constant_boolean_node (cmp == NE_EXPR, type); }
2054 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2055 TYPE_SIGN (TREE_TYPE (@0))))
2056 (cmp @0 @2)))))
2057 (for cmp (eq ne)
2058 (simplify
2059 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2060 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2061 TYPE_SIGN (TREE_TYPE (@0))))
2062 { constant_boolean_node (cmp == NE_EXPR, type); }
2063 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2064 TYPE_SIGN (TREE_TYPE (@0))))
2065 (cmp @0 @2)))))
2066 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2067 (for minmax (min min max max min min max max )
2068 cmp (lt le gt ge gt ge lt le )
2069 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2070 (simplify
2071 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2072 (comb (cmp @0 @2) (cmp @1 @2))))
2073
2074 /* Simplifications of shift and rotates. */
2075
2076 (for rotate (lrotate rrotate)
2077 (simplify
2078 (rotate integer_all_onesp@0 @1)
2079 @0))
2080
2081 /* Optimize -1 >> x for arithmetic right shifts. */
2082 (simplify
2083 (rshift integer_all_onesp@0 @1)
2084 (if (!TYPE_UNSIGNED (type)
2085 && tree_expr_nonnegative_p (@1))
2086 @0))
2087
2088 /* Optimize (x >> c) << c into x & (-1<<c). */
2089 (simplify
2090 (lshift (rshift @0 INTEGER_CST@1) @1)
2091 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2092 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2093
2094 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2095 types. */
2096 (simplify
2097 (rshift (lshift @0 INTEGER_CST@1) @1)
2098 (if (TYPE_UNSIGNED (type)
2099 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2100 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2101
2102 (for shiftrotate (lrotate rrotate lshift rshift)
2103 (simplify
2104 (shiftrotate @0 integer_zerop)
2105 (non_lvalue @0))
2106 (simplify
2107 (shiftrotate integer_zerop@0 @1)
2108 @0)
2109 /* Prefer vector1 << scalar to vector1 << vector2
2110 if vector2 is uniform. */
2111 (for vec (VECTOR_CST CONSTRUCTOR)
2112 (simplify
2113 (shiftrotate @0 vec@1)
2114 (with { tree tem = uniform_vector_p (@1); }
2115 (if (tem)
2116 (shiftrotate @0 { tem; }))))))
2117
2118 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2119 Y is 0. Similarly for X >> Y. */
2120 #if GIMPLE
2121 (for shift (lshift rshift)
2122 (simplify
2123 (shift @0 SSA_NAME@1)
2124 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2125 (with {
2126 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2127 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2128 }
2129 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2130 @0)))))
2131 #endif
2132
2133 /* Rewrite an LROTATE_EXPR by a constant into an
2134 RROTATE_EXPR by a new constant. */
2135 (simplify
2136 (lrotate @0 INTEGER_CST@1)
2137 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2138 build_int_cst (TREE_TYPE (@1),
2139 element_precision (type)), @1); }))
2140
2141 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2142 (for op (lrotate rrotate rshift lshift)
2143 (simplify
2144 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2145 (with { unsigned int prec = element_precision (type); }
2146 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2147 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2148 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2149 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2150 (with { unsigned int low = (tree_to_uhwi (@1)
2151 + tree_to_uhwi (@2)); }
2152 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2153 being well defined. */
2154 (if (low >= prec)
2155 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2156 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2157 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2158 { build_zero_cst (type); }
2159 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2160 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2161
2162
2163 /* ((1 << A) & 1) != 0 -> A == 0
2164 ((1 << A) & 1) == 0 -> A != 0 */
2165 (for cmp (ne eq)
2166 icmp (eq ne)
2167 (simplify
2168 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2169 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2170
2171 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2172 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2173 if CST2 != 0. */
2174 (for cmp (ne eq)
2175 (simplify
2176 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2177 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2178 (if (cand < 0
2179 || (!integer_zerop (@2)
2180 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2181 { constant_boolean_node (cmp == NE_EXPR, type); }
2182 (if (!integer_zerop (@2)
2183 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2184 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2185
2186 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2187 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2188 if the new mask might be further optimized. */
2189 (for shift (lshift rshift)
2190 (simplify
2191 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2192 INTEGER_CST@2)
2193 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2194 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2195 && tree_fits_uhwi_p (@1)
2196 && tree_to_uhwi (@1) > 0
2197 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2198 (with
2199 {
2200 unsigned int shiftc = tree_to_uhwi (@1);
2201 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2202 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2203 tree shift_type = TREE_TYPE (@3);
2204 unsigned int prec;
2205
2206 if (shift == LSHIFT_EXPR)
2207 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2208 else if (shift == RSHIFT_EXPR
2209 && type_has_mode_precision_p (shift_type))
2210 {
2211 prec = TYPE_PRECISION (TREE_TYPE (@3));
2212 tree arg00 = @0;
2213 /* See if more bits can be proven as zero because of
2214 zero extension. */
2215 if (@3 != @0
2216 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2217 {
2218 tree inner_type = TREE_TYPE (@0);
2219 if (type_has_mode_precision_p (inner_type)
2220 && TYPE_PRECISION (inner_type) < prec)
2221 {
2222 prec = TYPE_PRECISION (inner_type);
2223 /* See if we can shorten the right shift. */
2224 if (shiftc < prec)
2225 shift_type = inner_type;
2226 /* Otherwise X >> C1 is all zeros, so we'll optimize
2227 it into (X, 0) later on by making sure zerobits
2228 is all ones. */
2229 }
2230 }
2231 zerobits = HOST_WIDE_INT_M1U;
2232 if (shiftc < prec)
2233 {
2234 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2235 zerobits <<= prec - shiftc;
2236 }
2237 /* For arithmetic shift if sign bit could be set, zerobits
2238 can contain actually sign bits, so no transformation is
2239 possible, unless MASK masks them all away. In that
2240 case the shift needs to be converted into logical shift. */
2241 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2242 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2243 {
2244 if ((mask & zerobits) == 0)
2245 shift_type = unsigned_type_for (TREE_TYPE (@3));
2246 else
2247 zerobits = 0;
2248 }
2249 }
2250 }
2251 /* ((X << 16) & 0xff00) is (X, 0). */
2252 (if ((mask & zerobits) == mask)
2253 { build_int_cst (type, 0); }
2254 (with { newmask = mask | zerobits; }
2255 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2256 (with
2257 {
2258 /* Only do the transformation if NEWMASK is some integer
2259 mode's mask. */
2260 for (prec = BITS_PER_UNIT;
2261 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2262 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2263 break;
2264 }
2265 (if (prec < HOST_BITS_PER_WIDE_INT
2266 || newmask == HOST_WIDE_INT_M1U)
2267 (with
2268 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2269 (if (!tree_int_cst_equal (newmaskt, @2))
2270 (if (shift_type != TREE_TYPE (@3))
2271 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2272 (bit_and @4 { newmaskt; })))))))))))))
2273
2274 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2275 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2276 (for shift (lshift rshift)
2277 (for bit_op (bit_and bit_xor bit_ior)
2278 (simplify
2279 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2280 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2281 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2282 (bit_op (shift (convert @0) @1) { mask; }))))))
2283
2284 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2285 (simplify
2286 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2287 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2288 && (element_precision (TREE_TYPE (@0))
2289 <= element_precision (TREE_TYPE (@1))
2290 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2291 (with
2292 { tree shift_type = TREE_TYPE (@0); }
2293 (convert (rshift (convert:shift_type @1) @2)))))
2294
2295 /* ~(~X >>r Y) -> X >>r Y
2296 ~(~X <<r Y) -> X <<r Y */
2297 (for rotate (lrotate rrotate)
2298 (simplify
2299 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2300 (if ((element_precision (TREE_TYPE (@0))
2301 <= element_precision (TREE_TYPE (@1))
2302 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2303 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2304 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2305 (with
2306 { tree rotate_type = TREE_TYPE (@0); }
2307 (convert (rotate (convert:rotate_type @1) @2))))))
2308
2309 /* Simplifications of conversions. */
2310
2311 /* Basic strip-useless-type-conversions / strip_nops. */
2312 (for cvt (convert view_convert float fix_trunc)
2313 (simplify
2314 (cvt @0)
2315 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2316 || (GENERIC && type == TREE_TYPE (@0)))
2317 @0)))
2318
2319 /* Contract view-conversions. */
2320 (simplify
2321 (view_convert (view_convert @0))
2322 (view_convert @0))
2323
2324 /* For integral conversions with the same precision or pointer
2325 conversions use a NOP_EXPR instead. */
2326 (simplify
2327 (view_convert @0)
2328 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2329 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2330 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2331 (convert @0)))
2332
2333 /* Strip inner integral conversions that do not change precision or size, or
2334 zero-extend while keeping the same size (for bool-to-char). */
2335 (simplify
2336 (view_convert (convert@0 @1))
2337 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2338 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2339 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2340 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2341 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2342 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2343 (view_convert @1)))
2344
2345 /* Re-association barriers around constants and other re-association
2346 barriers can be removed. */
2347 (simplify
2348 (paren CONSTANT_CLASS_P@0)
2349 @0)
2350 (simplify
2351 (paren (paren@1 @0))
2352 @1)
2353
2354 /* Handle cases of two conversions in a row. */
2355 (for ocvt (convert float fix_trunc)
2356 (for icvt (convert float)
2357 (simplify
2358 (ocvt (icvt@1 @0))
2359 (with
2360 {
2361 tree inside_type = TREE_TYPE (@0);
2362 tree inter_type = TREE_TYPE (@1);
2363 int inside_int = INTEGRAL_TYPE_P (inside_type);
2364 int inside_ptr = POINTER_TYPE_P (inside_type);
2365 int inside_float = FLOAT_TYPE_P (inside_type);
2366 int inside_vec = VECTOR_TYPE_P (inside_type);
2367 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2368 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2369 int inter_int = INTEGRAL_TYPE_P (inter_type);
2370 int inter_ptr = POINTER_TYPE_P (inter_type);
2371 int inter_float = FLOAT_TYPE_P (inter_type);
2372 int inter_vec = VECTOR_TYPE_P (inter_type);
2373 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2374 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2375 int final_int = INTEGRAL_TYPE_P (type);
2376 int final_ptr = POINTER_TYPE_P (type);
2377 int final_float = FLOAT_TYPE_P (type);
2378 int final_vec = VECTOR_TYPE_P (type);
2379 unsigned int final_prec = TYPE_PRECISION (type);
2380 int final_unsignedp = TYPE_UNSIGNED (type);
2381 }
2382 (switch
2383 /* In addition to the cases of two conversions in a row
2384 handled below, if we are converting something to its own
2385 type via an object of identical or wider precision, neither
2386 conversion is needed. */
2387 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2388 || (GENERIC
2389 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2390 && (((inter_int || inter_ptr) && final_int)
2391 || (inter_float && final_float))
2392 && inter_prec >= final_prec)
2393 (ocvt @0))
2394
2395 /* Likewise, if the intermediate and initial types are either both
2396 float or both integer, we don't need the middle conversion if the
2397 former is wider than the latter and doesn't change the signedness
2398 (for integers). Avoid this if the final type is a pointer since
2399 then we sometimes need the middle conversion. */
2400 (if (((inter_int && inside_int) || (inter_float && inside_float))
2401 && (final_int || final_float)
2402 && inter_prec >= inside_prec
2403 && (inter_float || inter_unsignedp == inside_unsignedp))
2404 (ocvt @0))
2405
2406 /* If we have a sign-extension of a zero-extended value, we can
2407 replace that by a single zero-extension. Likewise if the
2408 final conversion does not change precision we can drop the
2409 intermediate conversion. */
2410 (if (inside_int && inter_int && final_int
2411 && ((inside_prec < inter_prec && inter_prec < final_prec
2412 && inside_unsignedp && !inter_unsignedp)
2413 || final_prec == inter_prec))
2414 (ocvt @0))
2415
2416 /* Two conversions in a row are not needed unless:
2417 - some conversion is floating-point (overstrict for now), or
2418 - some conversion is a vector (overstrict for now), or
2419 - the intermediate type is narrower than both initial and
2420 final, or
2421 - the intermediate type and innermost type differ in signedness,
2422 and the outermost type is wider than the intermediate, or
2423 - the initial type is a pointer type and the precisions of the
2424 intermediate and final types differ, or
2425 - the final type is a pointer type and the precisions of the
2426 initial and intermediate types differ. */
2427 (if (! inside_float && ! inter_float && ! final_float
2428 && ! inside_vec && ! inter_vec && ! final_vec
2429 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2430 && ! (inside_int && inter_int
2431 && inter_unsignedp != inside_unsignedp
2432 && inter_prec < final_prec)
2433 && ((inter_unsignedp && inter_prec > inside_prec)
2434 == (final_unsignedp && final_prec > inter_prec))
2435 && ! (inside_ptr && inter_prec != final_prec)
2436 && ! (final_ptr && inside_prec != inter_prec))
2437 (ocvt @0))
2438
2439 /* A truncation to an unsigned type (a zero-extension) should be
2440 canonicalized as bitwise and of a mask. */
2441 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2442 && final_int && inter_int && inside_int
2443 && final_prec == inside_prec
2444 && final_prec > inter_prec
2445 && inter_unsignedp)
2446 (convert (bit_and @0 { wide_int_to_tree
2447 (inside_type,
2448 wi::mask (inter_prec, false,
2449 TYPE_PRECISION (inside_type))); })))
2450
2451 /* If we are converting an integer to a floating-point that can
2452 represent it exactly and back to an integer, we can skip the
2453 floating-point conversion. */
2454 (if (GIMPLE /* PR66211 */
2455 && inside_int && inter_float && final_int &&
2456 (unsigned) significand_size (TYPE_MODE (inter_type))
2457 >= inside_prec - !inside_unsignedp)
2458 (convert @0)))))))
2459
2460 /* If we have a narrowing conversion to an integral type that is fed by a
2461 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2462 masks off bits outside the final type (and nothing else). */
2463 (simplify
2464 (convert (bit_and @0 INTEGER_CST@1))
2465 (if (INTEGRAL_TYPE_P (type)
2466 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2467 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2468 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2469 TYPE_PRECISION (type)), 0))
2470 (convert @0)))
2471
2472
2473 /* (X /[ex] A) * A -> X. */
2474 (simplify
2475 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2476 (convert @0))
2477
2478 /* Canonicalization of binary operations. */
2479
2480 /* Convert X + -C into X - C. */
2481 (simplify
2482 (plus @0 REAL_CST@1)
2483 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2484 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2485 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2486 (minus @0 { tem; })))))
2487
2488 /* Convert x+x into x*2. */
2489 (simplify
2490 (plus @0 @0)
2491 (if (SCALAR_FLOAT_TYPE_P (type))
2492 (mult @0 { build_real (type, dconst2); })
2493 (if (INTEGRAL_TYPE_P (type))
2494 (mult @0 { build_int_cst (type, 2); }))))
2495
2496 /* 0 - X -> -X. */
2497 (simplify
2498 (minus integer_zerop @1)
2499 (negate @1))
2500 (simplify
2501 (pointer_diff integer_zerop @1)
2502 (negate (convert @1)))
2503
2504 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2505 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2506 (-ARG1 + ARG0) reduces to -ARG1. */
2507 (simplify
2508 (minus real_zerop@0 @1)
2509 (if (fold_real_zero_addition_p (type, @0, 0))
2510 (negate @1)))
2511
2512 /* Transform x * -1 into -x. */
2513 (simplify
2514 (mult @0 integer_minus_onep)
2515 (negate @0))
2516
2517 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2518 signed overflow for CST != 0 && CST != -1. */
2519 (simplify
2520 (mult:c (mult:s @0 INTEGER_CST@1) @2)
2521 (if (TREE_CODE (@2) != INTEGER_CST
2522 && !integer_zerop (@1) && !integer_minus_onep (@1))
2523 (mult (mult @0 @2) @1)))
2524
2525 /* True if we can easily extract the real and imaginary parts of a complex
2526 number. */
2527 (match compositional_complex
2528 (convert? (complex @0 @1)))
2529
2530 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2531 (simplify
2532 (complex (realpart @0) (imagpart @0))
2533 @0)
2534 (simplify
2535 (realpart (complex @0 @1))
2536 @0)
2537 (simplify
2538 (imagpart (complex @0 @1))
2539 @1)
2540
2541 /* Sometimes we only care about half of a complex expression. */
2542 (simplify
2543 (realpart (convert?:s (conj:s @0)))
2544 (convert (realpart @0)))
2545 (simplify
2546 (imagpart (convert?:s (conj:s @0)))
2547 (convert (negate (imagpart @0))))
2548 (for part (realpart imagpart)
2549 (for op (plus minus)
2550 (simplify
2551 (part (convert?:s@2 (op:s @0 @1)))
2552 (convert (op (part @0) (part @1))))))
2553 (simplify
2554 (realpart (convert?:s (CEXPI:s @0)))
2555 (convert (COS @0)))
2556 (simplify
2557 (imagpart (convert?:s (CEXPI:s @0)))
2558 (convert (SIN @0)))
2559
2560 /* conj(conj(x)) -> x */
2561 (simplify
2562 (conj (convert? (conj @0)))
2563 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2564 (convert @0)))
2565
2566 /* conj({x,y}) -> {x,-y} */
2567 (simplify
2568 (conj (convert?:s (complex:s @0 @1)))
2569 (with { tree itype = TREE_TYPE (type); }
2570 (complex (convert:itype @0) (negate (convert:itype @1)))))
2571
2572 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2573 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2574 (simplify
2575 (bswap (bswap @0))
2576 @0)
2577 (simplify
2578 (bswap (bit_not (bswap @0)))
2579 (bit_not @0))
2580 (for bitop (bit_xor bit_ior bit_and)
2581 (simplify
2582 (bswap (bitop:c (bswap @0) @1))
2583 (bitop @0 (bswap @1)))))
2584
2585
2586 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2587
2588 /* Simplify constant conditions.
2589 Only optimize constant conditions when the selected branch
2590 has the same type as the COND_EXPR. This avoids optimizing
2591 away "c ? x : throw", where the throw has a void type.
2592 Note that we cannot throw away the fold-const.c variant nor
2593 this one as we depend on doing this transform before possibly
2594 A ? B : B -> B triggers and the fold-const.c one can optimize
2595 0 ? A : B to B even if A has side-effects. Something
2596 genmatch cannot handle. */
2597 (simplify
2598 (cond INTEGER_CST@0 @1 @2)
2599 (if (integer_zerop (@0))
2600 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2601 @2)
2602 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2603 @1)))
2604 (simplify
2605 (vec_cond VECTOR_CST@0 @1 @2)
2606 (if (integer_all_onesp (@0))
2607 @1
2608 (if (integer_zerop (@0))
2609 @2)))
2610
2611 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2612 be extended. */
2613 /* This pattern implements two kinds simplification:
2614
2615 Case 1)
2616 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2617 1) Conversions are type widening from smaller type.
2618 2) Const c1 equals to c2 after canonicalizing comparison.
2619 3) Comparison has tree code LT, LE, GT or GE.
2620 This specific pattern is needed when (cmp (convert x) c) may not
2621 be simplified by comparison patterns because of multiple uses of
2622 x. It also makes sense here because simplifying across multiple
2623 referred var is always benefitial for complicated cases.
2624
2625 Case 2)
2626 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2627 (for cmp (lt le gt ge eq)
2628 (simplify
2629 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2630 (with
2631 {
2632 tree from_type = TREE_TYPE (@1);
2633 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2634 enum tree_code code = ERROR_MARK;
2635
2636 if (INTEGRAL_TYPE_P (from_type)
2637 && int_fits_type_p (@2, from_type)
2638 && (types_match (c1_type, from_type)
2639 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2640 && (TYPE_UNSIGNED (from_type)
2641 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2642 && (types_match (c2_type, from_type)
2643 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2644 && (TYPE_UNSIGNED (from_type)
2645 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2646 {
2647 if (cmp != EQ_EXPR)
2648 {
2649 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2650 {
2651 /* X <= Y - 1 equals to X < Y. */
2652 if (cmp == LE_EXPR)
2653 code = LT_EXPR;
2654 /* X > Y - 1 equals to X >= Y. */
2655 if (cmp == GT_EXPR)
2656 code = GE_EXPR;
2657 }
2658 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2659 {
2660 /* X < Y + 1 equals to X <= Y. */
2661 if (cmp == LT_EXPR)
2662 code = LE_EXPR;
2663 /* X >= Y + 1 equals to X > Y. */
2664 if (cmp == GE_EXPR)
2665 code = GT_EXPR;
2666 }
2667 if (code != ERROR_MARK
2668 || wi::to_widest (@2) == wi::to_widest (@3))
2669 {
2670 if (cmp == LT_EXPR || cmp == LE_EXPR)
2671 code = MIN_EXPR;
2672 if (cmp == GT_EXPR || cmp == GE_EXPR)
2673 code = MAX_EXPR;
2674 }
2675 }
2676 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2677 else if (int_fits_type_p (@3, from_type))
2678 code = EQ_EXPR;
2679 }
2680 }
2681 (if (code == MAX_EXPR)
2682 (convert (max @1 (convert @2)))
2683 (if (code == MIN_EXPR)
2684 (convert (min @1 (convert @2)))
2685 (if (code == EQ_EXPR)
2686 (convert (cond (eq @1 (convert @3))
2687 (convert:from_type @3) (convert:from_type @2)))))))))
2688
2689 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2690
2691 1) OP is PLUS or MINUS.
2692 2) CMP is LT, LE, GT or GE.
2693 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2694
2695 This pattern also handles special cases like:
2696
2697 A) Operand x is a unsigned to signed type conversion and c1 is
2698 integer zero. In this case,
2699 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2700 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2701 B) Const c1 may not equal to (C3 op' C2). In this case we also
2702 check equality for (c1+1) and (c1-1) by adjusting comparison
2703 code.
2704
2705 TODO: Though signed type is handled by this pattern, it cannot be
2706 simplified at the moment because C standard requires additional
2707 type promotion. In order to match&simplify it here, the IR needs
2708 to be cleaned up by other optimizers, i.e, VRP. */
2709 (for op (plus minus)
2710 (for cmp (lt le gt ge)
2711 (simplify
2712 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2713 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2714 (if (types_match (from_type, to_type)
2715 /* Check if it is special case A). */
2716 || (TYPE_UNSIGNED (from_type)
2717 && !TYPE_UNSIGNED (to_type)
2718 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2719 && integer_zerop (@1)
2720 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2721 (with
2722 {
2723 bool overflow = false;
2724 enum tree_code code, cmp_code = cmp;
2725 wide_int real_c1;
2726 wide_int c1 = wi::to_wide (@1);
2727 wide_int c2 = wi::to_wide (@2);
2728 wide_int c3 = wi::to_wide (@3);
2729 signop sgn = TYPE_SIGN (from_type);
2730
2731 /* Handle special case A), given x of unsigned type:
2732 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2733 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2734 if (!types_match (from_type, to_type))
2735 {
2736 if (cmp_code == LT_EXPR)
2737 cmp_code = GT_EXPR;
2738 if (cmp_code == GE_EXPR)
2739 cmp_code = LE_EXPR;
2740 c1 = wi::max_value (to_type);
2741 }
2742 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2743 compute (c3 op' c2) and check if it equals to c1 with op' being
2744 the inverted operator of op. Make sure overflow doesn't happen
2745 if it is undefined. */
2746 if (op == PLUS_EXPR)
2747 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2748 else
2749 real_c1 = wi::add (c3, c2, sgn, &overflow);
2750
2751 code = cmp_code;
2752 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2753 {
2754 /* Check if c1 equals to real_c1. Boundary condition is handled
2755 by adjusting comparison operation if necessary. */
2756 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2757 && !overflow)
2758 {
2759 /* X <= Y - 1 equals to X < Y. */
2760 if (cmp_code == LE_EXPR)
2761 code = LT_EXPR;
2762 /* X > Y - 1 equals to X >= Y. */
2763 if (cmp_code == GT_EXPR)
2764 code = GE_EXPR;
2765 }
2766 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2767 && !overflow)
2768 {
2769 /* X < Y + 1 equals to X <= Y. */
2770 if (cmp_code == LT_EXPR)
2771 code = LE_EXPR;
2772 /* X >= Y + 1 equals to X > Y. */
2773 if (cmp_code == GE_EXPR)
2774 code = GT_EXPR;
2775 }
2776 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2777 {
2778 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2779 code = MIN_EXPR;
2780 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2781 code = MAX_EXPR;
2782 }
2783 }
2784 }
2785 (if (code == MAX_EXPR)
2786 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2787 { wide_int_to_tree (from_type, c2); })
2788 (if (code == MIN_EXPR)
2789 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2790 { wide_int_to_tree (from_type, c2); })))))))))
2791
2792 (for cnd (cond vec_cond)
2793 /* A ? B : (A ? X : C) -> A ? B : C. */
2794 (simplify
2795 (cnd @0 (cnd @0 @1 @2) @3)
2796 (cnd @0 @1 @3))
2797 (simplify
2798 (cnd @0 @1 (cnd @0 @2 @3))
2799 (cnd @0 @1 @3))
2800 /* A ? B : (!A ? C : X) -> A ? B : C. */
2801 /* ??? This matches embedded conditions open-coded because genmatch
2802 would generate matching code for conditions in separate stmts only.
2803 The following is still important to merge then and else arm cases
2804 from if-conversion. */
2805 (simplify
2806 (cnd @0 @1 (cnd @2 @3 @4))
2807 (if (COMPARISON_CLASS_P (@0)
2808 && COMPARISON_CLASS_P (@2)
2809 && invert_tree_comparison
2810 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2811 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2812 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2813 (cnd @0 @1 @3)))
2814 (simplify
2815 (cnd @0 (cnd @1 @2 @3) @4)
2816 (if (COMPARISON_CLASS_P (@0)
2817 && COMPARISON_CLASS_P (@1)
2818 && invert_tree_comparison
2819 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2820 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2821 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2822 (cnd @0 @3 @4)))
2823
2824 /* A ? B : B -> B. */
2825 (simplify
2826 (cnd @0 @1 @1)
2827 @1)
2828
2829 /* !A ? B : C -> A ? C : B. */
2830 (simplify
2831 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2832 (cnd @0 @2 @1)))
2833
2834 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2835 return all -1 or all 0 results. */
2836 /* ??? We could instead convert all instances of the vec_cond to negate,
2837 but that isn't necessarily a win on its own. */
2838 (simplify
2839 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2840 (if (VECTOR_TYPE_P (type)
2841 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2842 && (TYPE_MODE (TREE_TYPE (type))
2843 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2844 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2845
2846 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
2847 (simplify
2848 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2849 (if (VECTOR_TYPE_P (type)
2850 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2851 && (TYPE_MODE (TREE_TYPE (type))
2852 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2853 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2854
2855
2856 /* Simplifications of comparisons. */
2857
2858 /* See if we can reduce the magnitude of a constant involved in a
2859 comparison by changing the comparison code. This is a canonicalization
2860 formerly done by maybe_canonicalize_comparison_1. */
2861 (for cmp (le gt)
2862 acmp (lt ge)
2863 (simplify
2864 (cmp @0 INTEGER_CST@1)
2865 (if (tree_int_cst_sgn (@1) == -1)
2866 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
2867 (for cmp (ge lt)
2868 acmp (gt le)
2869 (simplify
2870 (cmp @0 INTEGER_CST@1)
2871 (if (tree_int_cst_sgn (@1) == 1)
2872 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
2873
2874
2875 /* We can simplify a logical negation of a comparison to the
2876 inverted comparison. As we cannot compute an expression
2877 operator using invert_tree_comparison we have to simulate
2878 that with expression code iteration. */
2879 (for cmp (tcc_comparison)
2880 icmp (inverted_tcc_comparison)
2881 ncmp (inverted_tcc_comparison_with_nans)
2882 /* Ideally we'd like to combine the following two patterns
2883 and handle some more cases by using
2884 (logical_inverted_value (cmp @0 @1))
2885 here but for that genmatch would need to "inline" that.
2886 For now implement what forward_propagate_comparison did. */
2887 (simplify
2888 (bit_not (cmp @0 @1))
2889 (if (VECTOR_TYPE_P (type)
2890 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2891 /* Comparison inversion may be impossible for trapping math,
2892 invert_tree_comparison will tell us. But we can't use
2893 a computed operator in the replacement tree thus we have
2894 to play the trick below. */
2895 (with { enum tree_code ic = invert_tree_comparison
2896 (cmp, HONOR_NANS (@0)); }
2897 (if (ic == icmp)
2898 (icmp @0 @1)
2899 (if (ic == ncmp)
2900 (ncmp @0 @1))))))
2901 (simplify
2902 (bit_xor (cmp @0 @1) integer_truep)
2903 (with { enum tree_code ic = invert_tree_comparison
2904 (cmp, HONOR_NANS (@0)); }
2905 (if (ic == icmp)
2906 (icmp @0 @1)
2907 (if (ic == ncmp)
2908 (ncmp @0 @1))))))
2909
2910 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2911 ??? The transformation is valid for the other operators if overflow
2912 is undefined for the type, but performing it here badly interacts
2913 with the transformation in fold_cond_expr_with_comparison which
2914 attempts to synthetize ABS_EXPR. */
2915 (for cmp (eq ne)
2916 (for sub (minus pointer_diff)
2917 (simplify
2918 (cmp (sub@2 @0 @1) integer_zerop)
2919 (if (single_use (@2))
2920 (cmp @0 @1)))))
2921
2922 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2923 signed arithmetic case. That form is created by the compiler
2924 often enough for folding it to be of value. One example is in
2925 computing loop trip counts after Operator Strength Reduction. */
2926 (for cmp (simple_comparison)
2927 scmp (swapped_simple_comparison)
2928 (simplify
2929 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2930 /* Handle unfolded multiplication by zero. */
2931 (if (integer_zerop (@1))
2932 (cmp @1 @2)
2933 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2934 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2935 && single_use (@3))
2936 /* If @1 is negative we swap the sense of the comparison. */
2937 (if (tree_int_cst_sgn (@1) < 0)
2938 (scmp @0 @2)
2939 (cmp @0 @2))))))
2940
2941 /* Simplify comparison of something with itself. For IEEE
2942 floating-point, we can only do some of these simplifications. */
2943 (for cmp (eq ge le)
2944 (simplify
2945 (cmp @0 @0)
2946 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
2947 || ! HONOR_NANS (@0))
2948 { constant_boolean_node (true, type); }
2949 (if (cmp != EQ_EXPR)
2950 (eq @0 @0)))))
2951 (for cmp (ne gt lt)
2952 (simplify
2953 (cmp @0 @0)
2954 (if (cmp != NE_EXPR
2955 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
2956 || ! HONOR_NANS (@0))
2957 { constant_boolean_node (false, type); })))
2958 (for cmp (unle unge uneq)
2959 (simplify
2960 (cmp @0 @0)
2961 { constant_boolean_node (true, type); }))
2962 (for cmp (unlt ungt)
2963 (simplify
2964 (cmp @0 @0)
2965 (unordered @0 @0)))
2966 (simplify
2967 (ltgt @0 @0)
2968 (if (!flag_trapping_math)
2969 { constant_boolean_node (false, type); }))
2970
2971 /* Fold ~X op ~Y as Y op X. */
2972 (for cmp (simple_comparison)
2973 (simplify
2974 (cmp (bit_not@2 @0) (bit_not@3 @1))
2975 (if (single_use (@2) && single_use (@3))
2976 (cmp @1 @0))))
2977
2978 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
2979 (for cmp (simple_comparison)
2980 scmp (swapped_simple_comparison)
2981 (simplify
2982 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2983 (if (single_use (@2)
2984 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2985 (scmp @0 (bit_not @1)))))
2986
2987 (for cmp (simple_comparison)
2988 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2989 (simplify
2990 (cmp (convert@2 @0) (convert? @1))
2991 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2992 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2993 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2994 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2995 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2996 (with
2997 {
2998 tree type1 = TREE_TYPE (@1);
2999 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3000 {
3001 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3002 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3003 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3004 type1 = float_type_node;
3005 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3006 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3007 type1 = double_type_node;
3008 }
3009 tree newtype
3010 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3011 ? TREE_TYPE (@0) : type1);
3012 }
3013 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3014 (cmp (convert:newtype @0) (convert:newtype @1))))))
3015
3016 (simplify
3017 (cmp @0 REAL_CST@1)
3018 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3019 (switch
3020 /* a CMP (-0) -> a CMP 0 */
3021 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3022 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3023 /* x != NaN is always true, other ops are always false. */
3024 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3025 && ! HONOR_SNANS (@1))
3026 { constant_boolean_node (cmp == NE_EXPR, type); })
3027 /* Fold comparisons against infinity. */
3028 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3029 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3030 (with
3031 {
3032 REAL_VALUE_TYPE max;
3033 enum tree_code code = cmp;
3034 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3035 if (neg)
3036 code = swap_tree_comparison (code);
3037 }
3038 (switch
3039 /* x > +Inf is always false, if with ignore sNANs. */
3040 (if (code == GT_EXPR
3041 && ! HONOR_SNANS (@0))
3042 { constant_boolean_node (false, type); })
3043 (if (code == LE_EXPR)
3044 /* x <= +Inf is always true, if we don't case about NaNs. */
3045 (if (! HONOR_NANS (@0))
3046 { constant_boolean_node (true, type); }
3047 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
3048 (eq @0 @0)))
3049 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
3050 (if (code == EQ_EXPR || code == GE_EXPR)
3051 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3052 (if (neg)
3053 (lt @0 { build_real (TREE_TYPE (@0), max); })
3054 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3055 /* x < +Inf is always equal to x <= DBL_MAX. */
3056 (if (code == LT_EXPR)
3057 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3058 (if (neg)
3059 (ge @0 { build_real (TREE_TYPE (@0), max); })
3060 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3061 /* x != +Inf is always equal to !(x > DBL_MAX). */
3062 (if (code == NE_EXPR)
3063 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3064 (if (! HONOR_NANS (@0))
3065 (if (neg)
3066 (ge @0 { build_real (TREE_TYPE (@0), max); })
3067 (le @0 { build_real (TREE_TYPE (@0), max); }))
3068 (if (neg)
3069 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
3070 { build_one_cst (type); })
3071 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
3072 { build_one_cst (type); }))))))))))
3073
3074 /* If this is a comparison of a real constant with a PLUS_EXPR
3075 or a MINUS_EXPR of a real constant, we can convert it into a
3076 comparison with a revised real constant as long as no overflow
3077 occurs when unsafe_math_optimizations are enabled. */
3078 (if (flag_unsafe_math_optimizations)
3079 (for op (plus minus)
3080 (simplify
3081 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3082 (with
3083 {
3084 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3085 TREE_TYPE (@1), @2, @1);
3086 }
3087 (if (tem && !TREE_OVERFLOW (tem))
3088 (cmp @0 { tem; }))))))
3089
3090 /* Likewise, we can simplify a comparison of a real constant with
3091 a MINUS_EXPR whose first operand is also a real constant, i.e.
3092 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3093 floating-point types only if -fassociative-math is set. */
3094 (if (flag_associative_math)
3095 (simplify
3096 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3097 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3098 (if (tem && !TREE_OVERFLOW (tem))
3099 (cmp { tem; } @1)))))
3100
3101 /* Fold comparisons against built-in math functions. */
3102 (if (flag_unsafe_math_optimizations
3103 && ! flag_errno_math)
3104 (for sq (SQRT)
3105 (simplify
3106 (cmp (sq @0) REAL_CST@1)
3107 (switch
3108 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3109 (switch
3110 /* sqrt(x) < y is always false, if y is negative. */
3111 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3112 { constant_boolean_node (false, type); })
3113 /* sqrt(x) > y is always true, if y is negative and we
3114 don't care about NaNs, i.e. negative values of x. */
3115 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3116 { constant_boolean_node (true, type); })
3117 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3118 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3119 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3120 (switch
3121 /* sqrt(x) < 0 is always false. */
3122 (if (cmp == LT_EXPR)
3123 { constant_boolean_node (false, type); })
3124 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3125 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3126 { constant_boolean_node (true, type); })
3127 /* sqrt(x) <= 0 -> x == 0. */
3128 (if (cmp == LE_EXPR)
3129 (eq @0 @1))
3130 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3131 == or !=. In the last case:
3132
3133 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3134
3135 if x is negative or NaN. Due to -funsafe-math-optimizations,
3136 the results for other x follow from natural arithmetic. */
3137 (cmp @0 @1)))
3138 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3139 (with
3140 {
3141 REAL_VALUE_TYPE c2;
3142 real_arithmetic (&c2, MULT_EXPR,
3143 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3144 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3145 }
3146 (if (REAL_VALUE_ISINF (c2))
3147 /* sqrt(x) > y is x == +Inf, when y is very large. */
3148 (if (HONOR_INFINITIES (@0))
3149 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3150 { constant_boolean_node (false, type); })
3151 /* sqrt(x) > c is the same as x > c*c. */
3152 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3153 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3154 (with
3155 {
3156 REAL_VALUE_TYPE c2;
3157 real_arithmetic (&c2, MULT_EXPR,
3158 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3159 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3160 }
3161 (if (REAL_VALUE_ISINF (c2))
3162 (switch
3163 /* sqrt(x) < y is always true, when y is a very large
3164 value and we don't care about NaNs or Infinities. */
3165 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3166 { constant_boolean_node (true, type); })
3167 /* sqrt(x) < y is x != +Inf when y is very large and we
3168 don't care about NaNs. */
3169 (if (! HONOR_NANS (@0))
3170 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3171 /* sqrt(x) < y is x >= 0 when y is very large and we
3172 don't care about Infinities. */
3173 (if (! HONOR_INFINITIES (@0))
3174 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3175 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3176 (if (GENERIC)
3177 (truth_andif
3178 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3179 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3180 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3181 (if (! HONOR_NANS (@0))
3182 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3183 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3184 (if (GENERIC)
3185 (truth_andif
3186 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3187 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3188 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3189 (simplify
3190 (cmp (sq @0) (sq @1))
3191 (if (! HONOR_NANS (@0))
3192 (cmp @0 @1))))))
3193
3194 /* Optimize various special cases of (FTYPE) N CMP CST. */
3195 (for cmp (lt le eq ne ge gt)
3196 icmp (le le eq ne ge ge)
3197 (simplify
3198 (cmp (float @0) REAL_CST@1)
3199 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3200 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3201 (with
3202 {
3203 tree itype = TREE_TYPE (@0);
3204 signop isign = TYPE_SIGN (itype);
3205 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3206 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3207 /* Be careful to preserve any potential exceptions due to
3208 NaNs. qNaNs are ok in == or != context.
3209 TODO: relax under -fno-trapping-math or
3210 -fno-signaling-nans. */
3211 bool exception_p
3212 = real_isnan (cst) && (cst->signalling
3213 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3214 /* INT?_MIN is power-of-two so it takes
3215 only one mantissa bit. */
3216 bool signed_p = isign == SIGNED;
3217 bool itype_fits_ftype_p
3218 = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt);
3219 }
3220 /* TODO: allow non-fitting itype and SNaNs when
3221 -fno-trapping-math. */
3222 (if (itype_fits_ftype_p && ! exception_p)
3223 (with
3224 {
3225 REAL_VALUE_TYPE imin, imax;
3226 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3227 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3228
3229 REAL_VALUE_TYPE icst;
3230 if (cmp == GT_EXPR || cmp == GE_EXPR)
3231 real_ceil (&icst, fmt, cst);
3232 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3233 real_floor (&icst, fmt, cst);
3234 else
3235 real_trunc (&icst, fmt, cst);
3236
3237 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3238
3239 bool overflow_p = false;
3240 wide_int icst_val
3241 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3242 }
3243 (switch
3244 /* Optimize cases when CST is outside of ITYPE's range. */
3245 (if (real_compare (LT_EXPR, cst, &imin))
3246 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3247 type); })
3248 (if (real_compare (GT_EXPR, cst, &imax))
3249 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3250 type); })
3251 /* Remove cast if CST is an integer representable by ITYPE. */
3252 (if (cst_int_p)
3253 (cmp @0 { gcc_assert (!overflow_p);
3254 wide_int_to_tree (itype, icst_val); })
3255 )
3256 /* When CST is fractional, optimize
3257 (FTYPE) N == CST -> 0
3258 (FTYPE) N != CST -> 1. */
3259 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3260 { constant_boolean_node (cmp == NE_EXPR, type); })
3261 /* Otherwise replace with sensible integer constant. */
3262 (with
3263 {
3264 gcc_checking_assert (!overflow_p);
3265 }
3266 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3267
3268 /* Fold A /[ex] B CMP C to A CMP B * C. */
3269 (for cmp (eq ne)
3270 (simplify
3271 (cmp (exact_div @0 @1) INTEGER_CST@2)
3272 (if (!integer_zerop (@1))
3273 (if (wi::to_wide (@2) == 0)
3274 (cmp @0 @2)
3275 (if (TREE_CODE (@1) == INTEGER_CST)
3276 (with
3277 {
3278 bool ovf;
3279 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3280 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3281 }
3282 (if (ovf)
3283 { constant_boolean_node (cmp == NE_EXPR, type); }
3284 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3285 (for cmp (lt le gt ge)
3286 (simplify
3287 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3288 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3289 (with
3290 {
3291 bool ovf;
3292 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3293 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3294 }
3295 (if (ovf)
3296 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3297 TYPE_SIGN (TREE_TYPE (@2)))
3298 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3299 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3300
3301 /* Unordered tests if either argument is a NaN. */
3302 (simplify
3303 (bit_ior (unordered @0 @0) (unordered @1 @1))
3304 (if (types_match (@0, @1))
3305 (unordered @0 @1)))
3306 (simplify
3307 (bit_and (ordered @0 @0) (ordered @1 @1))
3308 (if (types_match (@0, @1))
3309 (ordered @0 @1)))
3310 (simplify
3311 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3312 @2)
3313 (simplify
3314 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3315 @2)
3316
3317 /* Simple range test simplifications. */
3318 /* A < B || A >= B -> true. */
3319 (for test1 (lt le le le ne ge)
3320 test2 (ge gt ge ne eq ne)
3321 (simplify
3322 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3323 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3324 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3325 { constant_boolean_node (true, type); })))
3326 /* A < B && A >= B -> false. */
3327 (for test1 (lt lt lt le ne eq)
3328 test2 (ge gt eq gt eq gt)
3329 (simplify
3330 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3331 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3332 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3333 { constant_boolean_node (false, type); })))
3334
3335 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3336 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3337
3338 Note that comparisons
3339 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3340 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3341 will be canonicalized to above so there's no need to
3342 consider them here.
3343 */
3344
3345 (for cmp (le gt)
3346 eqcmp (eq ne)
3347 (simplify
3348 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3349 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3350 (with
3351 {
3352 tree ty = TREE_TYPE (@0);
3353 unsigned prec = TYPE_PRECISION (ty);
3354 wide_int mask = wi::to_wide (@2, prec);
3355 wide_int rhs = wi::to_wide (@3, prec);
3356 signop sgn = TYPE_SIGN (ty);
3357 }
3358 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3359 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3360 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3361 { build_zero_cst (ty); }))))))
3362
3363 /* -A CMP -B -> B CMP A. */
3364 (for cmp (tcc_comparison)
3365 scmp (swapped_tcc_comparison)
3366 (simplify
3367 (cmp (negate @0) (negate @1))
3368 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3369 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3370 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3371 (scmp @0 @1)))
3372 (simplify
3373 (cmp (negate @0) CONSTANT_CLASS_P@1)
3374 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3375 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3376 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3377 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3378 (if (tem && !TREE_OVERFLOW (tem))
3379 (scmp @0 { tem; }))))))
3380
3381 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3382 (for op (eq ne)
3383 (simplify
3384 (op (abs @0) zerop@1)
3385 (op @0 @1)))
3386
3387 /* From fold_sign_changed_comparison and fold_widened_comparison.
3388 FIXME: the lack of symmetry is disturbing. */
3389 (for cmp (simple_comparison)
3390 (simplify
3391 (cmp (convert@0 @00) (convert?@1 @10))
3392 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3393 /* Disable this optimization if we're casting a function pointer
3394 type on targets that require function pointer canonicalization. */
3395 && !(targetm.have_canonicalize_funcptr_for_compare ()
3396 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
3397 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
3398 && single_use (@0))
3399 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3400 && (TREE_CODE (@10) == INTEGER_CST
3401 || @1 != @10)
3402 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3403 || cmp == NE_EXPR
3404 || cmp == EQ_EXPR)
3405 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3406 /* ??? The special-casing of INTEGER_CST conversion was in the original
3407 code and here to avoid a spurious overflow flag on the resulting
3408 constant which fold_convert produces. */
3409 (if (TREE_CODE (@1) == INTEGER_CST)
3410 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3411 TREE_OVERFLOW (@1)); })
3412 (cmp @00 (convert @1)))
3413
3414 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3415 /* If possible, express the comparison in the shorter mode. */
3416 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3417 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3418 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3419 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3420 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3421 || ((TYPE_PRECISION (TREE_TYPE (@00))
3422 >= TYPE_PRECISION (TREE_TYPE (@10)))
3423 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3424 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3425 || (TREE_CODE (@10) == INTEGER_CST
3426 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3427 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3428 (cmp @00 (convert @10))
3429 (if (TREE_CODE (@10) == INTEGER_CST
3430 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3431 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3432 (with
3433 {
3434 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3435 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3436 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3437 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3438 }
3439 (if (above || below)
3440 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3441 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3442 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3443 { constant_boolean_node (above ? true : false, type); }
3444 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3445 { constant_boolean_node (above ? false : true, type); }))))))))))))
3446
3447 (for cmp (eq ne)
3448 /* A local variable can never be pointed to by
3449 the default SSA name of an incoming parameter.
3450 SSA names are canonicalized to 2nd place. */
3451 (simplify
3452 (cmp addr@0 SSA_NAME@1)
3453 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3454 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3455 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3456 (if (TREE_CODE (base) == VAR_DECL
3457 && auto_var_in_fn_p (base, current_function_decl))
3458 (if (cmp == NE_EXPR)
3459 { constant_boolean_node (true, type); }
3460 { constant_boolean_node (false, type); }))))))
3461
3462 /* Equality compare simplifications from fold_binary */
3463 (for cmp (eq ne)
3464
3465 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3466 Similarly for NE_EXPR. */
3467 (simplify
3468 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3469 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3470 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3471 { constant_boolean_node (cmp == NE_EXPR, type); }))
3472
3473 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3474 (simplify
3475 (cmp (bit_xor @0 @1) integer_zerop)
3476 (cmp @0 @1))
3477
3478 /* (X ^ Y) == Y becomes X == 0.
3479 Likewise (X ^ Y) == X becomes Y == 0. */
3480 (simplify
3481 (cmp:c (bit_xor:c @0 @1) @0)
3482 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3483
3484 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3485 (simplify
3486 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3487 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3488 (cmp @0 (bit_xor @1 (convert @2)))))
3489
3490 (simplify
3491 (cmp (convert? addr@0) integer_zerop)
3492 (if (tree_single_nonzero_warnv_p (@0, NULL))
3493 { constant_boolean_node (cmp == NE_EXPR, type); })))
3494
3495 /* If we have (A & C) == C where C is a power of 2, convert this into
3496 (A & C) != 0. Similarly for NE_EXPR. */
3497 (for cmp (eq ne)
3498 icmp (ne eq)
3499 (simplify
3500 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3501 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3502
3503 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3504 convert this into a shift followed by ANDing with D. */
3505 (simplify
3506 (cond
3507 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3508 integer_pow2p@2 integer_zerop)
3509 (with {
3510 int shift = (wi::exact_log2 (wi::to_wide (@2))
3511 - wi::exact_log2 (wi::to_wide (@1)));
3512 }
3513 (if (shift > 0)
3514 (bit_and
3515 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3516 (bit_and
3517 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) @2))))
3518
3519 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3520 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3521 (for cmp (eq ne)
3522 ncmp (ge lt)
3523 (simplify
3524 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3525 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3526 && type_has_mode_precision_p (TREE_TYPE (@0))
3527 && element_precision (@2) >= element_precision (@0)
3528 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3529 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3530 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3531
3532 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3533 this into a right shift or sign extension followed by ANDing with C. */
3534 (simplify
3535 (cond
3536 (lt @0 integer_zerop)
3537 integer_pow2p@1 integer_zerop)
3538 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
3539 (with {
3540 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3541 }
3542 (if (shift >= 0)
3543 (bit_and
3544 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3545 @1)
3546 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3547 sign extension followed by AND with C will achieve the effect. */
3548 (bit_and (convert @0) @1)))))
3549
3550 /* When the addresses are not directly of decls compare base and offset.
3551 This implements some remaining parts of fold_comparison address
3552 comparisons but still no complete part of it. Still it is good
3553 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3554 (for cmp (simple_comparison)
3555 (simplify
3556 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3557 (with
3558 {
3559 poly_int64 off0, off1;
3560 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3561 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3562 if (base0 && TREE_CODE (base0) == MEM_REF)
3563 {
3564 off0 += mem_ref_offset (base0).force_shwi ();
3565 base0 = TREE_OPERAND (base0, 0);
3566 }
3567 if (base1 && TREE_CODE (base1) == MEM_REF)
3568 {
3569 off1 += mem_ref_offset (base1).force_shwi ();
3570 base1 = TREE_OPERAND (base1, 0);
3571 }
3572 }
3573 (if (base0 && base1)
3574 (with
3575 {
3576 int equal = 2;
3577 /* Punt in GENERIC on variables with value expressions;
3578 the value expressions might point to fields/elements
3579 of other vars etc. */
3580 if (GENERIC
3581 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3582 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3583 ;
3584 else if (decl_in_symtab_p (base0)
3585 && decl_in_symtab_p (base1))
3586 equal = symtab_node::get_create (base0)
3587 ->equal_address_to (symtab_node::get_create (base1));
3588 else if ((DECL_P (base0)
3589 || TREE_CODE (base0) == SSA_NAME
3590 || TREE_CODE (base0) == STRING_CST)
3591 && (DECL_P (base1)
3592 || TREE_CODE (base1) == SSA_NAME
3593 || TREE_CODE (base1) == STRING_CST))
3594 equal = (base0 == base1);
3595 }
3596 (if (equal == 1)
3597 (switch
3598 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3599 { constant_boolean_node (known_eq (off0, off1), type); })
3600 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3601 { constant_boolean_node (known_ne (off0, off1), type); })
3602 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3603 { constant_boolean_node (known_lt (off0, off1), type); })
3604 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3605 { constant_boolean_node (known_le (off0, off1), type); })
3606 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3607 { constant_boolean_node (known_ge (off0, off1), type); })
3608 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3609 { constant_boolean_node (known_gt (off0, off1), type); }))
3610 (if (equal == 0
3611 && DECL_P (base0) && DECL_P (base1)
3612 /* If we compare this as integers require equal offset. */
3613 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3614 || known_eq (off0, off1)))
3615 (switch
3616 (if (cmp == EQ_EXPR)
3617 { constant_boolean_node (false, type); })
3618 (if (cmp == NE_EXPR)
3619 { constant_boolean_node (true, type); })))))))))
3620
3621 /* Simplify pointer equality compares using PTA. */
3622 (for neeq (ne eq)
3623 (simplify
3624 (neeq @0 @1)
3625 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3626 && ptrs_compare_unequal (@0, @1))
3627 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
3628
3629 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3630 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3631 Disable the transform if either operand is pointer to function.
3632 This broke pr22051-2.c for arm where function pointer
3633 canonicalizaion is not wanted. */
3634
3635 (for cmp (ne eq)
3636 (simplify
3637 (cmp (convert @0) INTEGER_CST@1)
3638 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3639 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3640 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
3641 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3642 (cmp @0 (convert @1)))))
3643
3644 /* Non-equality compare simplifications from fold_binary */
3645 (for cmp (lt gt le ge)
3646 /* Comparisons with the highest or lowest possible integer of
3647 the specified precision will have known values. */
3648 (simplify
3649 (cmp (convert?@2 @0) INTEGER_CST@1)
3650 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3651 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3652 (with
3653 {
3654 tree arg1_type = TREE_TYPE (@1);
3655 unsigned int prec = TYPE_PRECISION (arg1_type);
3656 wide_int max = wi::max_value (arg1_type);
3657 wide_int signed_max = wi::max_value (prec, SIGNED);
3658 wide_int min = wi::min_value (arg1_type);
3659 }
3660 (switch
3661 (if (wi::to_wide (@1) == max)
3662 (switch
3663 (if (cmp == GT_EXPR)
3664 { constant_boolean_node (false, type); })
3665 (if (cmp == GE_EXPR)
3666 (eq @2 @1))
3667 (if (cmp == LE_EXPR)
3668 { constant_boolean_node (true, type); })
3669 (if (cmp == LT_EXPR)
3670 (ne @2 @1))))
3671 (if (wi::to_wide (@1) == min)
3672 (switch
3673 (if (cmp == LT_EXPR)
3674 { constant_boolean_node (false, type); })
3675 (if (cmp == LE_EXPR)
3676 (eq @2 @1))
3677 (if (cmp == GE_EXPR)
3678 { constant_boolean_node (true, type); })
3679 (if (cmp == GT_EXPR)
3680 (ne @2 @1))))
3681 (if (wi::to_wide (@1) == max - 1)
3682 (switch
3683 (if (cmp == GT_EXPR)
3684 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
3685 (if (cmp == LE_EXPR)
3686 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3687 (if (wi::to_wide (@1) == min + 1)
3688 (switch
3689 (if (cmp == GE_EXPR)
3690 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
3691 (if (cmp == LT_EXPR)
3692 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3693 (if (wi::to_wide (@1) == signed_max
3694 && TYPE_UNSIGNED (arg1_type)
3695 /* We will flip the signedness of the comparison operator
3696 associated with the mode of @1, so the sign bit is
3697 specified by this mode. Check that @1 is the signed
3698 max associated with this sign bit. */
3699 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
3700 /* signed_type does not work on pointer types. */
3701 && INTEGRAL_TYPE_P (arg1_type))
3702 /* The following case also applies to X < signed_max+1
3703 and X >= signed_max+1 because previous transformations. */
3704 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3705 (with { tree st = signed_type_for (arg1_type); }
3706 (if (cmp == LE_EXPR)
3707 (ge (convert:st @0) { build_zero_cst (st); })
3708 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3709
3710 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3711 /* If the second operand is NaN, the result is constant. */
3712 (simplify
3713 (cmp @0 REAL_CST@1)
3714 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3715 && (cmp != LTGT_EXPR || ! flag_trapping_math))
3716 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
3717 ? false : true, type); })))
3718
3719 /* bool_var != 0 becomes bool_var. */
3720 (simplify
3721 (ne @0 integer_zerop)
3722 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3723 && types_match (type, TREE_TYPE (@0)))
3724 (non_lvalue @0)))
3725 /* bool_var == 1 becomes bool_var. */
3726 (simplify
3727 (eq @0 integer_onep)
3728 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3729 && types_match (type, TREE_TYPE (@0)))
3730 (non_lvalue @0)))
3731 /* Do not handle
3732 bool_var == 0 becomes !bool_var or
3733 bool_var != 1 becomes !bool_var
3734 here because that only is good in assignment context as long
3735 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3736 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3737 clearly less optimal and which we'll transform again in forwprop. */
3738
3739 /* When one argument is a constant, overflow detection can be simplified.
3740 Currently restricted to single use so as not to interfere too much with
3741 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3742 A + CST CMP A -> A CMP' CST' */
3743 (for cmp (lt le ge gt)
3744 out (gt gt le le)
3745 (simplify
3746 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
3747 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3748 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3749 && wi::to_wide (@1) != 0
3750 && single_use (@2))
3751 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
3752 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
3753 wi::max_value (prec, UNSIGNED)
3754 - wi::to_wide (@1)); })))))
3755
3756 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3757 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3758 expects the long form, so we restrict the transformation for now. */
3759 (for cmp (gt le)
3760 (simplify
3761 (cmp:c (minus@2 @0 @1) @0)
3762 (if (single_use (@2)
3763 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3764 && TYPE_UNSIGNED (TREE_TYPE (@0))
3765 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3766 (cmp @1 @0))))
3767
3768 /* Testing for overflow is unnecessary if we already know the result. */
3769 /* A - B > A */
3770 (for cmp (gt le)
3771 out (ne eq)
3772 (simplify
3773 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3774 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3775 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3776 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3777 /* A + B < A */
3778 (for cmp (lt ge)
3779 out (ne eq)
3780 (simplify
3781 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3782 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3783 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3784 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3785
3786 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
3787 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
3788 (for cmp (lt ge)
3789 out (ne eq)
3790 (simplify
3791 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
3792 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3793 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3794 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
3795
3796 /* Simplification of math builtins. These rules must all be optimizations
3797 as well as IL simplifications. If there is a possibility that the new
3798 form could be a pessimization, the rule should go in the canonicalization
3799 section that follows this one.
3800
3801 Rules can generally go in this section if they satisfy one of
3802 the following:
3803
3804 - the rule describes an identity
3805
3806 - the rule replaces calls with something as simple as addition or
3807 multiplication
3808
3809 - the rule contains unary calls only and simplifies the surrounding
3810 arithmetic. (The idea here is to exclude non-unary calls in which
3811 one operand is constant and in which the call is known to be cheap
3812 when the operand has that value.) */
3813
3814 (if (flag_unsafe_math_optimizations)
3815 /* Simplify sqrt(x) * sqrt(x) -> x. */
3816 (simplify
3817 (mult (SQRT@1 @0) @1)
3818 (if (!HONOR_SNANS (type))
3819 @0))
3820
3821 (for op (plus minus)
3822 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
3823 (simplify
3824 (op (rdiv @0 @1)
3825 (rdiv @2 @1))
3826 (rdiv (op @0 @2) @1)))
3827
3828 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3829 (for root (SQRT CBRT)
3830 (simplify
3831 (mult (root:s @0) (root:s @1))
3832 (root (mult @0 @1))))
3833
3834 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3835 (for exps (EXP EXP2 EXP10 POW10)
3836 (simplify
3837 (mult (exps:s @0) (exps:s @1))
3838 (exps (plus @0 @1))))
3839
3840 /* Simplify a/root(b/c) into a*root(c/b). */
3841 (for root (SQRT CBRT)
3842 (simplify
3843 (rdiv @0 (root:s (rdiv:s @1 @2)))
3844 (mult @0 (root (rdiv @2 @1)))))
3845
3846 /* Simplify x/expN(y) into x*expN(-y). */
3847 (for exps (EXP EXP2 EXP10 POW10)
3848 (simplify
3849 (rdiv @0 (exps:s @1))
3850 (mult @0 (exps (negate @1)))))
3851
3852 (for logs (LOG LOG2 LOG10 LOG10)
3853 exps (EXP EXP2 EXP10 POW10)
3854 /* logN(expN(x)) -> x. */
3855 (simplify
3856 (logs (exps @0))
3857 @0)
3858 /* expN(logN(x)) -> x. */
3859 (simplify
3860 (exps (logs @0))
3861 @0))
3862
3863 /* Optimize logN(func()) for various exponential functions. We
3864 want to determine the value "x" and the power "exponent" in
3865 order to transform logN(x**exponent) into exponent*logN(x). */
3866 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3867 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
3868 (simplify
3869 (logs (exps @0))
3870 (if (SCALAR_FLOAT_TYPE_P (type))
3871 (with {
3872 tree x;
3873 switch (exps)
3874 {
3875 CASE_CFN_EXP:
3876 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3877 x = build_real_truncate (type, dconst_e ());
3878 break;
3879 CASE_CFN_EXP2:
3880 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3881 x = build_real (type, dconst2);
3882 break;
3883 CASE_CFN_EXP10:
3884 CASE_CFN_POW10:
3885 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3886 {
3887 REAL_VALUE_TYPE dconst10;
3888 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3889 x = build_real (type, dconst10);
3890 }
3891 break;
3892 default:
3893 gcc_unreachable ();
3894 }
3895 }
3896 (mult (logs { x; }) @0)))))
3897
3898 (for logs (LOG LOG
3899 LOG2 LOG2
3900 LOG10 LOG10)
3901 exps (SQRT CBRT)
3902 (simplify
3903 (logs (exps @0))
3904 (if (SCALAR_FLOAT_TYPE_P (type))
3905 (with {
3906 tree x;
3907 switch (exps)
3908 {
3909 CASE_CFN_SQRT:
3910 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3911 x = build_real (type, dconsthalf);
3912 break;
3913 CASE_CFN_CBRT:
3914 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3915 x = build_real_truncate (type, dconst_third ());
3916 break;
3917 default:
3918 gcc_unreachable ();
3919 }
3920 }
3921 (mult { x; } (logs @0))))))
3922
3923 /* logN(pow(x,exponent)) -> exponent*logN(x). */
3924 (for logs (LOG LOG2 LOG10)
3925 pows (POW)
3926 (simplify
3927 (logs (pows @0 @1))
3928 (mult @1 (logs @0))))
3929
3930 /* pow(C,x) -> exp(log(C)*x) if C > 0. */
3931 (for pows (POW)
3932 exps (EXP)
3933 logs (LOG)
3934 (simplify
3935 (pows REAL_CST@0 @1)
3936 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
3937 && real_isfinite (TREE_REAL_CST_PTR (@0)))
3938 (exps (mult (logs @0) @1)))))
3939
3940 (for sqrts (SQRT)
3941 cbrts (CBRT)
3942 pows (POW)
3943 exps (EXP EXP2 EXP10 POW10)
3944 /* sqrt(expN(x)) -> expN(x*0.5). */
3945 (simplify
3946 (sqrts (exps @0))
3947 (exps (mult @0 { build_real (type, dconsthalf); })))
3948 /* cbrt(expN(x)) -> expN(x/3). */
3949 (simplify
3950 (cbrts (exps @0))
3951 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
3952 /* pow(expN(x), y) -> expN(x*y). */
3953 (simplify
3954 (pows (exps @0) @1)
3955 (exps (mult @0 @1))))
3956
3957 /* tan(atan(x)) -> x. */
3958 (for tans (TAN)
3959 atans (ATAN)
3960 (simplify
3961 (tans (atans @0))
3962 @0)))
3963
3964 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
3965 (simplify
3966 (CABS (complex:C @0 real_zerop@1))
3967 (abs @0))
3968
3969 /* trunc(trunc(x)) -> trunc(x), etc. */
3970 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3971 (simplify
3972 (fns (fns @0))
3973 (fns @0)))
3974 /* f(x) -> x if x is integer valued and f does nothing for such values. */
3975 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3976 (simplify
3977 (fns integer_valued_real_p@0)
3978 @0))
3979
3980 /* hypot(x,0) and hypot(0,x) -> abs(x). */
3981 (simplify
3982 (HYPOT:c @0 real_zerop@1)
3983 (abs @0))
3984
3985 /* pow(1,x) -> 1. */
3986 (simplify
3987 (POW real_onep@0 @1)
3988 @0)
3989
3990 (simplify
3991 /* copysign(x,x) -> x. */
3992 (COPYSIGN @0 @0)
3993 @0)
3994
3995 (simplify
3996 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
3997 (COPYSIGN @0 tree_expr_nonnegative_p@1)
3998 (abs @0))
3999
4000 (for scale (LDEXP SCALBN SCALBLN)
4001 /* ldexp(0, x) -> 0. */
4002 (simplify
4003 (scale real_zerop@0 @1)
4004 @0)
4005 /* ldexp(x, 0) -> x. */
4006 (simplify
4007 (scale @0 integer_zerop@1)
4008 @0)
4009 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4010 (simplify
4011 (scale REAL_CST@0 @1)
4012 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4013 @0)))
4014
4015 /* Canonicalization of sequences of math builtins. These rules represent
4016 IL simplifications but are not necessarily optimizations.
4017
4018 The sincos pass is responsible for picking "optimal" implementations
4019 of math builtins, which may be more complicated and can sometimes go
4020 the other way, e.g. converting pow into a sequence of sqrts.
4021 We only want to do these canonicalizations before the pass has run. */
4022
4023 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4024 /* Simplify tan(x) * cos(x) -> sin(x). */
4025 (simplify
4026 (mult:c (TAN:s @0) (COS:s @0))
4027 (SIN @0))
4028
4029 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4030 (simplify
4031 (mult:c @0 (POW:s @0 REAL_CST@1))
4032 (if (!TREE_OVERFLOW (@1))
4033 (POW @0 (plus @1 { build_one_cst (type); }))))
4034
4035 /* Simplify sin(x) / cos(x) -> tan(x). */
4036 (simplify
4037 (rdiv (SIN:s @0) (COS:s @0))
4038 (TAN @0))
4039
4040 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4041 (simplify
4042 (rdiv (COS:s @0) (SIN:s @0))
4043 (rdiv { build_one_cst (type); } (TAN @0)))
4044
4045 /* Simplify sin(x) / tan(x) -> cos(x). */
4046 (simplify
4047 (rdiv (SIN:s @0) (TAN:s @0))
4048 (if (! HONOR_NANS (@0)
4049 && ! HONOR_INFINITIES (@0))
4050 (COS @0)))
4051
4052 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4053 (simplify
4054 (rdiv (TAN:s @0) (SIN:s @0))
4055 (if (! HONOR_NANS (@0)
4056 && ! HONOR_INFINITIES (@0))
4057 (rdiv { build_one_cst (type); } (COS @0))))
4058
4059 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4060 (simplify
4061 (mult (POW:s @0 @1) (POW:s @0 @2))
4062 (POW @0 (plus @1 @2)))
4063
4064 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4065 (simplify
4066 (mult (POW:s @0 @1) (POW:s @2 @1))
4067 (POW (mult @0 @2) @1))
4068
4069 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4070 (simplify
4071 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4072 (POWI (mult @0 @2) @1))
4073
4074 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4075 (simplify
4076 (rdiv (POW:s @0 REAL_CST@1) @0)
4077 (if (!TREE_OVERFLOW (@1))
4078 (POW @0 (minus @1 { build_one_cst (type); }))))
4079
4080 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4081 (simplify
4082 (rdiv @0 (POW:s @1 @2))
4083 (mult @0 (POW @1 (negate @2))))
4084
4085 (for sqrts (SQRT)
4086 cbrts (CBRT)
4087 pows (POW)
4088 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4089 (simplify
4090 (sqrts (sqrts @0))
4091 (pows @0 { build_real (type, dconst_quarter ()); }))
4092 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4093 (simplify
4094 (sqrts (cbrts @0))
4095 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4096 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4097 (simplify
4098 (cbrts (sqrts @0))
4099 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4100 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4101 (simplify
4102 (cbrts (cbrts tree_expr_nonnegative_p@0))
4103 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4104 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4105 (simplify
4106 (sqrts (pows @0 @1))
4107 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4108 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4109 (simplify
4110 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4111 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4112 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4113 (simplify
4114 (pows (sqrts @0) @1)
4115 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4116 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4117 (simplify
4118 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4119 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4120 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4121 (simplify
4122 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4123 (pows @0 (mult @1 @2))))
4124
4125 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4126 (simplify
4127 (CABS (complex @0 @0))
4128 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4129
4130 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4131 (simplify
4132 (HYPOT @0 @0)
4133 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4134
4135 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4136 (for cexps (CEXP)
4137 exps (EXP)
4138 cexpis (CEXPI)
4139 (simplify
4140 (cexps compositional_complex@0)
4141 (if (targetm.libc_has_function (function_c99_math_complex))
4142 (complex
4143 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4144 (mult @1 (imagpart @2)))))))
4145
4146 (if (canonicalize_math_p ())
4147 /* floor(x) -> trunc(x) if x is nonnegative. */
4148 (for floors (FLOOR)
4149 truncs (TRUNC)
4150 (simplify
4151 (floors tree_expr_nonnegative_p@0)
4152 (truncs @0))))
4153
4154 (match double_value_p
4155 @0
4156 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4157 (for froms (BUILT_IN_TRUNCL
4158 BUILT_IN_FLOORL
4159 BUILT_IN_CEILL
4160 BUILT_IN_ROUNDL
4161 BUILT_IN_NEARBYINTL
4162 BUILT_IN_RINTL)
4163 tos (BUILT_IN_TRUNC
4164 BUILT_IN_FLOOR
4165 BUILT_IN_CEIL
4166 BUILT_IN_ROUND
4167 BUILT_IN_NEARBYINT
4168 BUILT_IN_RINT)
4169 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4170 (if (optimize && canonicalize_math_p ())
4171 (simplify
4172 (froms (convert double_value_p@0))
4173 (convert (tos @0)))))
4174
4175 (match float_value_p
4176 @0
4177 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4178 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4179 BUILT_IN_FLOORL BUILT_IN_FLOOR
4180 BUILT_IN_CEILL BUILT_IN_CEIL
4181 BUILT_IN_ROUNDL BUILT_IN_ROUND
4182 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4183 BUILT_IN_RINTL BUILT_IN_RINT)
4184 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4185 BUILT_IN_FLOORF BUILT_IN_FLOORF
4186 BUILT_IN_CEILF BUILT_IN_CEILF
4187 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4188 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4189 BUILT_IN_RINTF BUILT_IN_RINTF)
4190 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4191 if x is a float. */
4192 (if (optimize && canonicalize_math_p ()
4193 && targetm.libc_has_function (function_c99_misc))
4194 (simplify
4195 (froms (convert float_value_p@0))
4196 (convert (tos @0)))))
4197
4198 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4199 tos (XFLOOR XCEIL XROUND XRINT)
4200 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4201 (if (optimize && canonicalize_math_p ())
4202 (simplify
4203 (froms (convert double_value_p@0))
4204 (tos @0))))
4205
4206 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4207 XFLOOR XCEIL XROUND XRINT)
4208 tos (XFLOORF XCEILF XROUNDF XRINTF)
4209 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4210 if x is a float. */
4211 (if (optimize && canonicalize_math_p ())
4212 (simplify
4213 (froms (convert float_value_p@0))
4214 (tos @0))))
4215
4216 (if (canonicalize_math_p ())
4217 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4218 (for floors (IFLOOR LFLOOR LLFLOOR)
4219 (simplify
4220 (floors tree_expr_nonnegative_p@0)
4221 (fix_trunc @0))))
4222
4223 (if (canonicalize_math_p ())
4224 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4225 (for fns (IFLOOR LFLOOR LLFLOOR
4226 ICEIL LCEIL LLCEIL
4227 IROUND LROUND LLROUND)
4228 (simplify
4229 (fns integer_valued_real_p@0)
4230 (fix_trunc @0)))
4231 (if (!flag_errno_math)
4232 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4233 (for rints (IRINT LRINT LLRINT)
4234 (simplify
4235 (rints integer_valued_real_p@0)
4236 (fix_trunc @0)))))
4237
4238 (if (canonicalize_math_p ())
4239 (for ifn (IFLOOR ICEIL IROUND IRINT)
4240 lfn (LFLOOR LCEIL LROUND LRINT)
4241 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4242 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4243 sizeof (int) == sizeof (long). */
4244 (if (TYPE_PRECISION (integer_type_node)
4245 == TYPE_PRECISION (long_integer_type_node))
4246 (simplify
4247 (ifn @0)
4248 (lfn:long_integer_type_node @0)))
4249 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4250 sizeof (long long) == sizeof (long). */
4251 (if (TYPE_PRECISION (long_long_integer_type_node)
4252 == TYPE_PRECISION (long_integer_type_node))
4253 (simplify
4254 (llfn @0)
4255 (lfn:long_integer_type_node @0)))))
4256
4257 /* cproj(x) -> x if we're ignoring infinities. */
4258 (simplify
4259 (CPROJ @0)
4260 (if (!HONOR_INFINITIES (type))
4261 @0))
4262
4263 /* If the real part is inf and the imag part is known to be
4264 nonnegative, return (inf + 0i). */
4265 (simplify
4266 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4267 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4268 { build_complex_inf (type, false); }))
4269
4270 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4271 (simplify
4272 (CPROJ (complex @0 REAL_CST@1))
4273 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4274 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4275
4276 (for pows (POW)
4277 sqrts (SQRT)
4278 cbrts (CBRT)
4279 (simplify
4280 (pows @0 REAL_CST@1)
4281 (with {
4282 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4283 REAL_VALUE_TYPE tmp;
4284 }
4285 (switch
4286 /* pow(x,0) -> 1. */
4287 (if (real_equal (value, &dconst0))
4288 { build_real (type, dconst1); })
4289 /* pow(x,1) -> x. */
4290 (if (real_equal (value, &dconst1))
4291 @0)
4292 /* pow(x,-1) -> 1/x. */
4293 (if (real_equal (value, &dconstm1))
4294 (rdiv { build_real (type, dconst1); } @0))
4295 /* pow(x,0.5) -> sqrt(x). */
4296 (if (flag_unsafe_math_optimizations
4297 && canonicalize_math_p ()
4298 && real_equal (value, &dconsthalf))
4299 (sqrts @0))
4300 /* pow(x,1/3) -> cbrt(x). */
4301 (if (flag_unsafe_math_optimizations
4302 && canonicalize_math_p ()
4303 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4304 real_equal (value, &tmp)))
4305 (cbrts @0))))))
4306
4307 /* powi(1,x) -> 1. */
4308 (simplify
4309 (POWI real_onep@0 @1)
4310 @0)
4311
4312 (simplify
4313 (POWI @0 INTEGER_CST@1)
4314 (switch
4315 /* powi(x,0) -> 1. */
4316 (if (wi::to_wide (@1) == 0)
4317 { build_real (type, dconst1); })
4318 /* powi(x,1) -> x. */
4319 (if (wi::to_wide (@1) == 1)
4320 @0)
4321 /* powi(x,-1) -> 1/x. */
4322 (if (wi::to_wide (@1) == -1)
4323 (rdiv { build_real (type, dconst1); } @0))))
4324
4325 /* Narrowing of arithmetic and logical operations.
4326
4327 These are conceptually similar to the transformations performed for
4328 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4329 term we want to move all that code out of the front-ends into here. */
4330
4331 /* If we have a narrowing conversion of an arithmetic operation where
4332 both operands are widening conversions from the same type as the outer
4333 narrowing conversion. Then convert the innermost operands to a suitable
4334 unsigned type (to avoid introducing undefined behavior), perform the
4335 operation and convert the result to the desired type. */
4336 (for op (plus minus)
4337 (simplify
4338 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4339 (if (INTEGRAL_TYPE_P (type)
4340 /* We check for type compatibility between @0 and @1 below,
4341 so there's no need to check that @1/@3 are integral types. */
4342 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4343 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4344 /* The precision of the type of each operand must match the
4345 precision of the mode of each operand, similarly for the
4346 result. */
4347 && type_has_mode_precision_p (TREE_TYPE (@0))
4348 && type_has_mode_precision_p (TREE_TYPE (@1))
4349 && type_has_mode_precision_p (type)
4350 /* The inner conversion must be a widening conversion. */
4351 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4352 && types_match (@0, type)
4353 && (types_match (@0, @1)
4354 /* Or the second operand is const integer or converted const
4355 integer from valueize. */
4356 || TREE_CODE (@1) == INTEGER_CST))
4357 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4358 (op @0 (convert @1))
4359 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4360 (convert (op (convert:utype @0)
4361 (convert:utype @1))))))))
4362
4363 /* This is another case of narrowing, specifically when there's an outer
4364 BIT_AND_EXPR which masks off bits outside the type of the innermost
4365 operands. Like the previous case we have to convert the operands
4366 to unsigned types to avoid introducing undefined behavior for the
4367 arithmetic operation. */
4368 (for op (minus plus)
4369 (simplify
4370 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4371 (if (INTEGRAL_TYPE_P (type)
4372 /* We check for type compatibility between @0 and @1 below,
4373 so there's no need to check that @1/@3 are integral types. */
4374 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4375 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4376 /* The precision of the type of each operand must match the
4377 precision of the mode of each operand, similarly for the
4378 result. */
4379 && type_has_mode_precision_p (TREE_TYPE (@0))
4380 && type_has_mode_precision_p (TREE_TYPE (@1))
4381 && type_has_mode_precision_p (type)
4382 /* The inner conversion must be a widening conversion. */
4383 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4384 && types_match (@0, @1)
4385 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4386 <= TYPE_PRECISION (TREE_TYPE (@0)))
4387 && (wi::to_wide (@4)
4388 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4389 true, TYPE_PRECISION (type))) == 0)
4390 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4391 (with { tree ntype = TREE_TYPE (@0); }
4392 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4393 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4394 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4395 (convert:utype @4))))))))
4396
4397 /* Transform (@0 < @1 and @0 < @2) to use min,
4398 (@0 > @1 and @0 > @2) to use max */
4399 (for op (lt le gt ge)
4400 ext (min min max max)
4401 (simplify
4402 (bit_and (op:cs @0 @1) (op:cs @0 @2))
4403 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4404 && TREE_CODE (@0) != INTEGER_CST)
4405 (op @0 (ext @1 @2)))))
4406
4407 (simplify
4408 /* signbit(x) -> 0 if x is nonnegative. */
4409 (SIGNBIT tree_expr_nonnegative_p@0)
4410 { integer_zero_node; })
4411
4412 (simplify
4413 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4414 (SIGNBIT @0)
4415 (if (!HONOR_SIGNED_ZEROS (@0))
4416 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4417
4418 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4419 (for cmp (eq ne)
4420 (for op (plus minus)
4421 rop (minus plus)
4422 (simplify
4423 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4424 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4425 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4426 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4427 && !TYPE_SATURATING (TREE_TYPE (@0)))
4428 (with { tree res = int_const_binop (rop, @2, @1); }
4429 (if (TREE_OVERFLOW (res)
4430 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4431 { constant_boolean_node (cmp == NE_EXPR, type); }
4432 (if (single_use (@3))
4433 (cmp @0 { TREE_OVERFLOW (res)
4434 ? drop_tree_overflow (res) : res; }))))))))
4435 (for cmp (lt le gt ge)
4436 (for op (plus minus)
4437 rop (minus plus)
4438 (simplify
4439 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4440 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4441 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4442 (with { tree res = int_const_binop (rop, @2, @1); }
4443 (if (TREE_OVERFLOW (res))
4444 {
4445 fold_overflow_warning (("assuming signed overflow does not occur "
4446 "when simplifying conditional to constant"),
4447 WARN_STRICT_OVERFLOW_CONDITIONAL);
4448 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4449 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4450 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4451 TYPE_SIGN (TREE_TYPE (@1)))
4452 != (op == MINUS_EXPR);
4453 constant_boolean_node (less == ovf_high, type);
4454 }
4455 (if (single_use (@3))
4456 (with
4457 {
4458 fold_overflow_warning (("assuming signed overflow does not occur "
4459 "when changing X +- C1 cmp C2 to "
4460 "X cmp C2 -+ C1"),
4461 WARN_STRICT_OVERFLOW_COMPARISON);
4462 }
4463 (cmp @0 { res; })))))))))
4464
4465 /* Canonicalizations of BIT_FIELD_REFs. */
4466
4467 (simplify
4468 (BIT_FIELD_REF @0 @1 @2)
4469 (switch
4470 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4471 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4472 (switch
4473 (if (integer_zerop (@2))
4474 (view_convert (realpart @0)))
4475 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4476 (view_convert (imagpart @0)))))
4477 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4478 && INTEGRAL_TYPE_P (type)
4479 /* On GIMPLE this should only apply to register arguments. */
4480 && (! GIMPLE || is_gimple_reg (@0))
4481 /* A bit-field-ref that referenced the full argument can be stripped. */
4482 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4483 && integer_zerop (@2))
4484 /* Low-parts can be reduced to integral conversions.
4485 ??? The following doesn't work for PDP endian. */
4486 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4487 /* Don't even think about BITS_BIG_ENDIAN. */
4488 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4489 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4490 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4491 ? (TYPE_PRECISION (TREE_TYPE (@0))
4492 - TYPE_PRECISION (type))
4493 : 0)) == 0)))
4494 (convert @0))))
4495
4496 /* Simplify vector extracts. */
4497
4498 (simplify
4499 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4500 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4501 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4502 || (VECTOR_TYPE_P (type)
4503 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4504 (with
4505 {
4506 tree ctor = (TREE_CODE (@0) == SSA_NAME
4507 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4508 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4509 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4510 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4511 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4512 }
4513 (if (n != 0
4514 && (idx % width) == 0
4515 && (n % width) == 0
4516 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
4517 (with
4518 {
4519 idx = idx / width;
4520 n = n / width;
4521 /* Constructor elements can be subvectors. */
4522 unsigned HOST_WIDE_INT k = 1;
4523 if (CONSTRUCTOR_NELTS (ctor) != 0)
4524 {
4525 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4526 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4527 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4528 }
4529 }
4530 (switch
4531 /* We keep an exact subset of the constructor elements. */
4532 (if ((idx % k) == 0 && (n % k) == 0)
4533 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4534 { build_constructor (type, NULL); }
4535 (with
4536 {
4537 idx /= k;
4538 n /= k;
4539 }
4540 (if (n == 1)
4541 (if (idx < CONSTRUCTOR_NELTS (ctor))
4542 { CONSTRUCTOR_ELT (ctor, idx)->value; }
4543 { build_zero_cst (type); })
4544 {
4545 vec<constructor_elt, va_gc> *vals;
4546 vec_alloc (vals, n);
4547 for (unsigned i = 0;
4548 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
4549 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4550 CONSTRUCTOR_ELT (ctor, idx + i)->value);
4551 build_constructor (type, vals);
4552 }))))
4553 /* The bitfield references a single constructor element. */
4554 (if (idx + n <= (idx / k + 1) * k)
4555 (switch
4556 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
4557 { build_zero_cst (type); })
4558 (if (n == k)
4559 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
4560 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
4561 @1 { bitsize_int ((idx % k) * width); })))))))))
4562
4563 /* Simplify a bit extraction from a bit insertion for the cases with
4564 the inserted element fully covering the extraction or the insertion
4565 not touching the extraction. */
4566 (simplify
4567 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4568 (with
4569 {
4570 unsigned HOST_WIDE_INT isize;
4571 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4572 isize = TYPE_PRECISION (TREE_TYPE (@1));
4573 else
4574 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4575 }
4576 (switch
4577 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4578 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
4579 wi::to_wide (@ipos) + isize))
4580 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4581 wi::to_wide (@rpos)
4582 - wi::to_wide (@ipos)); }))
4583 (if (wi::geu_p (wi::to_wide (@ipos),
4584 wi::to_wide (@rpos) + wi::to_wide (@rsize))
4585 || wi::geu_p (wi::to_wide (@rpos),
4586 wi::to_wide (@ipos) + isize))
4587 (BIT_FIELD_REF @0 @rsize @rpos)))))