]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/match.pd
Update libbid according to the latest Intel Decimal Floating-Point Math Library.
[thirdparty/gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2019 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 initializer_each_zero_or_onep
33 CONSTANT_CLASS_P
34 tree_expr_nonnegative_p
35 tree_expr_nonzero_p
36 integer_valued_real_p
37 integer_pow2p
38 uniform_integer_cst_p
39 HONOR_NANS)
40
41 /* Operator lists. */
42 (define_operator_list tcc_comparison
43 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
44 (define_operator_list inverted_tcc_comparison
45 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list inverted_tcc_comparison_with_nans
47 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
48 (define_operator_list swapped_tcc_comparison
49 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
50 (define_operator_list simple_comparison lt le eq ne ge gt)
51 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
52
53 #include "cfn-operators.pd"
54
55 /* Define operand lists for math rounding functions {,i,l,ll}FN,
56 where the versions prefixed with "i" return an int, those prefixed with
57 "l" return a long and those prefixed with "ll" return a long long.
58
59 Also define operand lists:
60
61 X<FN>F for all float functions, in the order i, l, ll
62 X<FN> for all double functions, in the same order
63 X<FN>L for all long double functions, in the same order. */
64 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
65 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
66 BUILT_IN_L##FN##F \
67 BUILT_IN_LL##FN##F) \
68 (define_operator_list X##FN BUILT_IN_I##FN \
69 BUILT_IN_L##FN \
70 BUILT_IN_LL##FN) \
71 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
72 BUILT_IN_L##FN##L \
73 BUILT_IN_LL##FN##L)
74
75 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
77 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
78 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
79
80 /* Binary operations and their associated IFN_COND_* function. */
81 (define_operator_list UNCOND_BINARY
82 plus minus
83 mult trunc_div trunc_mod rdiv
84 min max
85 bit_and bit_ior bit_xor)
86 (define_operator_list COND_BINARY
87 IFN_COND_ADD IFN_COND_SUB
88 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
89 IFN_COND_MIN IFN_COND_MAX
90 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR)
91
92 /* Same for ternary operations. */
93 (define_operator_list UNCOND_TERNARY
94 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
95 (define_operator_list COND_TERNARY
96 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
97
98 /* As opposed to convert?, this still creates a single pattern, so
99 it is not a suitable replacement for convert? in all cases. */
100 (match (nop_convert @0)
101 (convert @0)
102 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
103 (match (nop_convert @0)
104 (view_convert @0)
105 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
106 && known_eq (TYPE_VECTOR_SUBPARTS (type),
107 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
108 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
109 /* This one has to be last, or it shadows the others. */
110 (match (nop_convert @0)
111 @0)
112
113 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
114 ABSU_EXPR returns unsigned absolute value of the operand and the operand
115 of the ABSU_EXPR will have the corresponding signed type. */
116 (simplify (abs (convert @0))
117 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
118 && !TYPE_UNSIGNED (TREE_TYPE (@0))
119 && element_precision (type) > element_precision (TREE_TYPE (@0)))
120 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
121 (convert (absu:utype @0)))))
122
123
124 /* Simplifications of operations with one constant operand and
125 simplifications to constants or single values. */
126
127 (for op (plus pointer_plus minus bit_ior bit_xor)
128 (simplify
129 (op @0 integer_zerop)
130 (non_lvalue @0)))
131
132 /* 0 +p index -> (type)index */
133 (simplify
134 (pointer_plus integer_zerop @1)
135 (non_lvalue (convert @1)))
136
137 /* ptr - 0 -> (type)ptr */
138 (simplify
139 (pointer_diff @0 integer_zerop)
140 (convert @0))
141
142 /* See if ARG1 is zero and X + ARG1 reduces to X.
143 Likewise if the operands are reversed. */
144 (simplify
145 (plus:c @0 real_zerop@1)
146 (if (fold_real_zero_addition_p (type, @1, 0))
147 (non_lvalue @0)))
148
149 /* See if ARG1 is zero and X - ARG1 reduces to X. */
150 (simplify
151 (minus @0 real_zerop@1)
152 (if (fold_real_zero_addition_p (type, @1, 1))
153 (non_lvalue @0)))
154
155 /* Even if the fold_real_zero_addition_p can't simplify X + 0.0
156 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
157 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
158 if not -frounding-math. For sNaNs the first operation would raise
159 exceptions but turn the result into qNan, so the second operation
160 would not raise it. */
161 (for inner_op (plus minus)
162 (for outer_op (plus minus)
163 (simplify
164 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
165 (if (real_zerop (@1)
166 && real_zerop (@2)
167 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
168 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
169 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
170 bool outer_plus
171 = ((outer_op == PLUS_EXPR)
172 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
173 (if (outer_plus && !inner_plus)
174 (outer_op @0 @2)
175 @3))))))
176
177 /* Simplify x - x.
178 This is unsafe for certain floats even in non-IEEE formats.
179 In IEEE, it is unsafe because it does wrong for NaNs.
180 Also note that operand_equal_p is always false if an operand
181 is volatile. */
182 (simplify
183 (minus @0 @0)
184 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
185 { build_zero_cst (type); }))
186 (simplify
187 (pointer_diff @@0 @0)
188 { build_zero_cst (type); })
189
190 (simplify
191 (mult @0 integer_zerop@1)
192 @1)
193
194 /* Maybe fold x * 0 to 0. The expressions aren't the same
195 when x is NaN, since x * 0 is also NaN. Nor are they the
196 same in modes with signed zeros, since multiplying a
197 negative value by 0 gives -0, not +0. */
198 (simplify
199 (mult @0 real_zerop@1)
200 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
201 @1))
202
203 /* In IEEE floating point, x*1 is not equivalent to x for snans.
204 Likewise for complex arithmetic with signed zeros. */
205 (simplify
206 (mult @0 real_onep)
207 (if (!HONOR_SNANS (type)
208 && (!HONOR_SIGNED_ZEROS (type)
209 || !COMPLEX_FLOAT_TYPE_P (type)))
210 (non_lvalue @0)))
211
212 /* Transform x * -1.0 into -x. */
213 (simplify
214 (mult @0 real_minus_onep)
215 (if (!HONOR_SNANS (type)
216 && (!HONOR_SIGNED_ZEROS (type)
217 || !COMPLEX_FLOAT_TYPE_P (type)))
218 (negate @0)))
219
220 /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */
221 (simplify
222 (mult SSA_NAME@1 SSA_NAME@2)
223 (if (INTEGRAL_TYPE_P (type)
224 && get_nonzero_bits (@1) == 1
225 && get_nonzero_bits (@2) == 1)
226 (bit_and @1 @2)))
227
228 /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
229 unless the target has native support for the former but not the latter. */
230 (simplify
231 (mult @0 VECTOR_CST@1)
232 (if (initializer_each_zero_or_onep (@1)
233 && !HONOR_SNANS (type)
234 && !HONOR_SIGNED_ZEROS (type))
235 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
236 (if (itype
237 && (!VECTOR_MODE_P (TYPE_MODE (type))
238 || (VECTOR_MODE_P (TYPE_MODE (itype))
239 && optab_handler (and_optab,
240 TYPE_MODE (itype)) != CODE_FOR_nothing)))
241 (view_convert (bit_and:itype (view_convert @0)
242 (ne @1 { build_zero_cst (type); })))))))
243
244 (for cmp (gt ge lt le)
245 outp (convert convert negate negate)
246 outn (negate negate convert convert)
247 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
248 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
249 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
250 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
251 (simplify
252 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
253 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
254 && types_match (type, TREE_TYPE (@0)))
255 (switch
256 (if (types_match (type, float_type_node))
257 (BUILT_IN_COPYSIGNF @1 (outp @0)))
258 (if (types_match (type, double_type_node))
259 (BUILT_IN_COPYSIGN @1 (outp @0)))
260 (if (types_match (type, long_double_type_node))
261 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
262 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
263 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
264 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
265 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
266 (simplify
267 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
268 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
269 && types_match (type, TREE_TYPE (@0)))
270 (switch
271 (if (types_match (type, float_type_node))
272 (BUILT_IN_COPYSIGNF @1 (outn @0)))
273 (if (types_match (type, double_type_node))
274 (BUILT_IN_COPYSIGN @1 (outn @0)))
275 (if (types_match (type, long_double_type_node))
276 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
277
278 /* Transform X * copysign (1.0, X) into abs(X). */
279 (simplify
280 (mult:c @0 (COPYSIGN_ALL real_onep @0))
281 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
282 (abs @0)))
283
284 /* Transform X * copysign (1.0, -X) into -abs(X). */
285 (simplify
286 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
287 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
288 (negate (abs @0))))
289
290 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
291 (simplify
292 (COPYSIGN_ALL REAL_CST@0 @1)
293 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
294 (COPYSIGN_ALL (negate @0) @1)))
295
296 /* X * 1, X / 1 -> X. */
297 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
298 (simplify
299 (op @0 integer_onep)
300 (non_lvalue @0)))
301
302 /* (A / (1 << B)) -> (A >> B).
303 Only for unsigned A. For signed A, this would not preserve rounding
304 toward zero.
305 For example: (-1 / ( 1 << B)) != -1 >> B. */
306 (simplify
307 (trunc_div @0 (lshift integer_onep@1 @2))
308 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
309 && (!VECTOR_TYPE_P (type)
310 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
311 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
312 (rshift @0 @2)))
313
314 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
315 undefined behavior in constexpr evaluation, and assuming that the division
316 traps enables better optimizations than these anyway. */
317 (for div (trunc_div ceil_div floor_div round_div exact_div)
318 /* 0 / X is always zero. */
319 (simplify
320 (div integer_zerop@0 @1)
321 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
322 (if (!integer_zerop (@1))
323 @0))
324 /* X / -1 is -X. */
325 (simplify
326 (div @0 integer_minus_onep@1)
327 (if (!TYPE_UNSIGNED (type))
328 (negate @0)))
329 /* X / X is one. */
330 (simplify
331 (div @0 @0)
332 /* But not for 0 / 0 so that we can get the proper warnings and errors.
333 And not for _Fract types where we can't build 1. */
334 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
335 { build_one_cst (type); }))
336 /* X / abs (X) is X < 0 ? -1 : 1. */
337 (simplify
338 (div:C @0 (abs @0))
339 (if (INTEGRAL_TYPE_P (type)
340 && TYPE_OVERFLOW_UNDEFINED (type))
341 (cond (lt @0 { build_zero_cst (type); })
342 { build_minus_one_cst (type); } { build_one_cst (type); })))
343 /* X / -X is -1. */
344 (simplify
345 (div:C @0 (negate @0))
346 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
347 && TYPE_OVERFLOW_UNDEFINED (type))
348 { build_minus_one_cst (type); })))
349
350 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
351 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
352 (simplify
353 (floor_div @0 @1)
354 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
355 && TYPE_UNSIGNED (type))
356 (trunc_div @0 @1)))
357
358 /* Combine two successive divisions. Note that combining ceil_div
359 and floor_div is trickier and combining round_div even more so. */
360 (for div (trunc_div exact_div)
361 (simplify
362 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
363 (with {
364 wi::overflow_type overflow;
365 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
366 TYPE_SIGN (type), &overflow);
367 }
368 (if (div == EXACT_DIV_EXPR
369 || optimize_successive_divisions_p (@2, @3))
370 (if (!overflow)
371 (div @0 { wide_int_to_tree (type, mul); })
372 (if (TYPE_UNSIGNED (type)
373 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
374 { build_zero_cst (type); }))))))
375
376 /* Combine successive multiplications. Similar to above, but handling
377 overflow is different. */
378 (simplify
379 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
380 (with {
381 wi::overflow_type overflow;
382 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
383 TYPE_SIGN (type), &overflow);
384 }
385 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
386 otherwise undefined overflow implies that @0 must be zero. */
387 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
388 (mult @0 { wide_int_to_tree (type, mul); }))))
389
390 /* Optimize A / A to 1.0 if we don't care about
391 NaNs or Infinities. */
392 (simplify
393 (rdiv @0 @0)
394 (if (FLOAT_TYPE_P (type)
395 && ! HONOR_NANS (type)
396 && ! HONOR_INFINITIES (type))
397 { build_one_cst (type); }))
398
399 /* Optimize -A / A to -1.0 if we don't care about
400 NaNs or Infinities. */
401 (simplify
402 (rdiv:C @0 (negate @0))
403 (if (FLOAT_TYPE_P (type)
404 && ! HONOR_NANS (type)
405 && ! HONOR_INFINITIES (type))
406 { build_minus_one_cst (type); }))
407
408 /* PR71078: x / abs(x) -> copysign (1.0, x) */
409 (simplify
410 (rdiv:C (convert? @0) (convert? (abs @0)))
411 (if (SCALAR_FLOAT_TYPE_P (type)
412 && ! HONOR_NANS (type)
413 && ! HONOR_INFINITIES (type))
414 (switch
415 (if (types_match (type, float_type_node))
416 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
417 (if (types_match (type, double_type_node))
418 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
419 (if (types_match (type, long_double_type_node))
420 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
421
422 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
423 (simplify
424 (rdiv @0 real_onep)
425 (if (!HONOR_SNANS (type))
426 (non_lvalue @0)))
427
428 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
429 (simplify
430 (rdiv @0 real_minus_onep)
431 (if (!HONOR_SNANS (type))
432 (negate @0)))
433
434 (if (flag_reciprocal_math)
435 /* Convert (A/B)/C to A/(B*C). */
436 (simplify
437 (rdiv (rdiv:s @0 @1) @2)
438 (rdiv @0 (mult @1 @2)))
439
440 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
441 (simplify
442 (rdiv @0 (mult:s @1 REAL_CST@2))
443 (with
444 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
445 (if (tem)
446 (rdiv (mult @0 { tem; } ) @1))))
447
448 /* Convert A/(B/C) to (A/B)*C */
449 (simplify
450 (rdiv @0 (rdiv:s @1 @2))
451 (mult (rdiv @0 @1) @2)))
452
453 /* Simplify x / (- y) to -x / y. */
454 (simplify
455 (rdiv @0 (negate @1))
456 (rdiv (negate @0) @1))
457
458 (if (flag_unsafe_math_optimizations)
459 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
460 Since C / x may underflow to zero, do this only for unsafe math. */
461 (for op (lt le gt ge)
462 neg_op (gt ge lt le)
463 (simplify
464 (op (rdiv REAL_CST@0 @1) real_zerop@2)
465 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
466 (switch
467 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
468 (op @1 @2))
469 /* For C < 0, use the inverted operator. */
470 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
471 (neg_op @1 @2)))))))
472
473 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
474 (for div (trunc_div ceil_div floor_div round_div exact_div)
475 (simplify
476 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
477 (if (integer_pow2p (@2)
478 && tree_int_cst_sgn (@2) > 0
479 && tree_nop_conversion_p (type, TREE_TYPE (@0))
480 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
481 (rshift (convert @0)
482 { build_int_cst (integer_type_node,
483 wi::exact_log2 (wi::to_wide (@2))); }))))
484
485 /* If ARG1 is a constant, we can convert this to a multiply by the
486 reciprocal. This does not have the same rounding properties,
487 so only do this if -freciprocal-math. We can actually
488 always safely do it if ARG1 is a power of two, but it's hard to
489 tell if it is or not in a portable manner. */
490 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
491 (simplify
492 (rdiv @0 cst@1)
493 (if (optimize)
494 (if (flag_reciprocal_math
495 && !real_zerop (@1))
496 (with
497 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
498 (if (tem)
499 (mult @0 { tem; } )))
500 (if (cst != COMPLEX_CST)
501 (with { tree inverse = exact_inverse (type, @1); }
502 (if (inverse)
503 (mult @0 { inverse; } ))))))))
504
505 (for mod (ceil_mod floor_mod round_mod trunc_mod)
506 /* 0 % X is always zero. */
507 (simplify
508 (mod integer_zerop@0 @1)
509 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
510 (if (!integer_zerop (@1))
511 @0))
512 /* X % 1 is always zero. */
513 (simplify
514 (mod @0 integer_onep)
515 { build_zero_cst (type); })
516 /* X % -1 is zero. */
517 (simplify
518 (mod @0 integer_minus_onep@1)
519 (if (!TYPE_UNSIGNED (type))
520 { build_zero_cst (type); }))
521 /* X % X is zero. */
522 (simplify
523 (mod @0 @0)
524 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
525 (if (!integer_zerop (@0))
526 { build_zero_cst (type); }))
527 /* (X % Y) % Y is just X % Y. */
528 (simplify
529 (mod (mod@2 @0 @1) @1)
530 @2)
531 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
532 (simplify
533 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
534 (if (ANY_INTEGRAL_TYPE_P (type)
535 && TYPE_OVERFLOW_UNDEFINED (type)
536 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
537 TYPE_SIGN (type)))
538 { build_zero_cst (type); }))
539 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
540 modulo and comparison, since it is simpler and equivalent. */
541 (for cmp (eq ne)
542 (simplify
543 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
544 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
545 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
546 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
547
548 /* X % -C is the same as X % C. */
549 (simplify
550 (trunc_mod @0 INTEGER_CST@1)
551 (if (TYPE_SIGN (type) == SIGNED
552 && !TREE_OVERFLOW (@1)
553 && wi::neg_p (wi::to_wide (@1))
554 && !TYPE_OVERFLOW_TRAPS (type)
555 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
556 && !sign_bit_p (@1, @1))
557 (trunc_mod @0 (negate @1))))
558
559 /* X % -Y is the same as X % Y. */
560 (simplify
561 (trunc_mod @0 (convert? (negate @1)))
562 (if (INTEGRAL_TYPE_P (type)
563 && !TYPE_UNSIGNED (type)
564 && !TYPE_OVERFLOW_TRAPS (type)
565 && tree_nop_conversion_p (type, TREE_TYPE (@1))
566 /* Avoid this transformation if X might be INT_MIN or
567 Y might be -1, because we would then change valid
568 INT_MIN % -(-1) into invalid INT_MIN % -1. */
569 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
570 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
571 (TREE_TYPE (@1))))))
572 (trunc_mod @0 (convert @1))))
573
574 /* X - (X / Y) * Y is the same as X % Y. */
575 (simplify
576 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
577 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
578 (convert (trunc_mod @0 @1))))
579
580 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
581 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
582 Also optimize A % (C << N) where C is a power of 2,
583 to A & ((C << N) - 1). */
584 (match (power_of_two_cand @1)
585 INTEGER_CST@1)
586 (match (power_of_two_cand @1)
587 (lshift INTEGER_CST@1 @2))
588 (for mod (trunc_mod floor_mod)
589 (simplify
590 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
591 (if ((TYPE_UNSIGNED (type)
592 || tree_expr_nonnegative_p (@0))
593 && tree_nop_conversion_p (type, TREE_TYPE (@3))
594 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
595 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
596
597 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
598 (simplify
599 (trunc_div (mult @0 integer_pow2p@1) @1)
600 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
601 (bit_and @0 { wide_int_to_tree
602 (type, wi::mask (TYPE_PRECISION (type)
603 - wi::exact_log2 (wi::to_wide (@1)),
604 false, TYPE_PRECISION (type))); })))
605
606 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
607 (simplify
608 (mult (trunc_div @0 integer_pow2p@1) @1)
609 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
610 (bit_and @0 (negate @1))))
611
612 /* Simplify (t * 2) / 2) -> t. */
613 (for div (trunc_div ceil_div floor_div round_div exact_div)
614 (simplify
615 (div (mult:c @0 @1) @1)
616 (if (ANY_INTEGRAL_TYPE_P (type)
617 && TYPE_OVERFLOW_UNDEFINED (type))
618 @0)))
619
620 (for op (negate abs)
621 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
622 (for coss (COS COSH)
623 (simplify
624 (coss (op @0))
625 (coss @0)))
626 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
627 (for pows (POW)
628 (simplify
629 (pows (op @0) REAL_CST@1)
630 (with { HOST_WIDE_INT n; }
631 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
632 (pows @0 @1)))))
633 /* Likewise for powi. */
634 (for pows (POWI)
635 (simplify
636 (pows (op @0) INTEGER_CST@1)
637 (if ((wi::to_wide (@1) & 1) == 0)
638 (pows @0 @1))))
639 /* Strip negate and abs from both operands of hypot. */
640 (for hypots (HYPOT)
641 (simplify
642 (hypots (op @0) @1)
643 (hypots @0 @1))
644 (simplify
645 (hypots @0 (op @1))
646 (hypots @0 @1)))
647 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
648 (for copysigns (COPYSIGN_ALL)
649 (simplify
650 (copysigns (op @0) @1)
651 (copysigns @0 @1))))
652
653 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
654 (simplify
655 (mult (abs@1 @0) @1)
656 (mult @0 @0))
657
658 /* Convert absu(x)*absu(x) -> x*x. */
659 (simplify
660 (mult (absu@1 @0) @1)
661 (mult (convert@2 @0) @2))
662
663 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
664 (for coss (COS COSH)
665 copysigns (COPYSIGN)
666 (simplify
667 (coss (copysigns @0 @1))
668 (coss @0)))
669
670 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
671 (for pows (POW)
672 copysigns (COPYSIGN)
673 (simplify
674 (pows (copysigns @0 @2) REAL_CST@1)
675 (with { HOST_WIDE_INT n; }
676 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
677 (pows @0 @1)))))
678 /* Likewise for powi. */
679 (for pows (POWI)
680 copysigns (COPYSIGN)
681 (simplify
682 (pows (copysigns @0 @2) INTEGER_CST@1)
683 (if ((wi::to_wide (@1) & 1) == 0)
684 (pows @0 @1))))
685
686 (for hypots (HYPOT)
687 copysigns (COPYSIGN)
688 /* hypot(copysign(x, y), z) -> hypot(x, z). */
689 (simplify
690 (hypots (copysigns @0 @1) @2)
691 (hypots @0 @2))
692 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
693 (simplify
694 (hypots @0 (copysigns @1 @2))
695 (hypots @0 @1)))
696
697 /* copysign(x, CST) -> [-]abs (x). */
698 (for copysigns (COPYSIGN_ALL)
699 (simplify
700 (copysigns @0 REAL_CST@1)
701 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
702 (negate (abs @0))
703 (abs @0))))
704
705 /* copysign(copysign(x, y), z) -> copysign(x, z). */
706 (for copysigns (COPYSIGN_ALL)
707 (simplify
708 (copysigns (copysigns @0 @1) @2)
709 (copysigns @0 @2)))
710
711 /* copysign(x,y)*copysign(x,y) -> x*x. */
712 (for copysigns (COPYSIGN_ALL)
713 (simplify
714 (mult (copysigns@2 @0 @1) @2)
715 (mult @0 @0)))
716
717 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
718 (for ccoss (CCOS CCOSH)
719 (simplify
720 (ccoss (negate @0))
721 (ccoss @0)))
722
723 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
724 (for ops (conj negate)
725 (for cabss (CABS)
726 (simplify
727 (cabss (ops @0))
728 (cabss @0))))
729
730 /* Fold (a * (1 << b)) into (a << b) */
731 (simplify
732 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
733 (if (! FLOAT_TYPE_P (type)
734 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
735 (lshift @0 @2)))
736
737 /* Fold (1 << (C - x)) where C = precision(type) - 1
738 into ((1 << C) >> x). */
739 (simplify
740 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
741 (if (INTEGRAL_TYPE_P (type)
742 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
743 && single_use (@1))
744 (if (TYPE_UNSIGNED (type))
745 (rshift (lshift @0 @2) @3)
746 (with
747 { tree utype = unsigned_type_for (type); }
748 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
749
750 /* Fold (C1/X)*C2 into (C1*C2)/X. */
751 (simplify
752 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
753 (if (flag_associative_math
754 && single_use (@3))
755 (with
756 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
757 (if (tem)
758 (rdiv { tem; } @1)))))
759
760 /* Simplify ~X & X as zero. */
761 (simplify
762 (bit_and:c (convert? @0) (convert? (bit_not @0)))
763 { build_zero_cst (type); })
764
765 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
766 (simplify
767 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
768 (if (TYPE_UNSIGNED (type))
769 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
770
771 (for bitop (bit_and bit_ior)
772 cmp (eq ne)
773 /* PR35691: Transform
774 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
775 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
776 (simplify
777 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
778 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
779 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
780 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
781 (cmp (bit_ior @0 (convert @1)) @2)))
782 /* Transform:
783 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
784 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
785 (simplify
786 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
787 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
788 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
789 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
790 (cmp (bit_and @0 (convert @1)) @2))))
791
792 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
793 (simplify
794 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
795 (minus (bit_xor @0 @1) @1))
796 (simplify
797 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
798 (if (~wi::to_wide (@2) == wi::to_wide (@1))
799 (minus (bit_xor @0 @1) @1)))
800
801 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
802 (simplify
803 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
804 (minus @1 (bit_xor @0 @1)))
805
806 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
807 (for op (bit_ior bit_xor plus)
808 (simplify
809 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
810 (bit_xor @0 @1))
811 (simplify
812 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
813 (if (~wi::to_wide (@2) == wi::to_wide (@1))
814 (bit_xor @0 @1))))
815
816 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
817 (simplify
818 (bit_ior:c (bit_xor:c @0 @1) @0)
819 (bit_ior @0 @1))
820
821 /* (a & ~b) | (a ^ b) --> a ^ b */
822 (simplify
823 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
824 @2)
825
826 /* (a & ~b) ^ ~a --> ~(a & b) */
827 (simplify
828 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
829 (bit_not (bit_and @0 @1)))
830
831 /* (a | b) & ~(a ^ b) --> a & b */
832 (simplify
833 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
834 (bit_and @0 @1))
835
836 /* a | ~(a ^ b) --> a | ~b */
837 (simplify
838 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
839 (bit_ior @0 (bit_not @1)))
840
841 /* (a | b) | (a &^ b) --> a | b */
842 (for op (bit_and bit_xor)
843 (simplify
844 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
845 @2))
846
847 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
848 (simplify
849 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
850 @2)
851
852 /* ~(~a & b) --> a | ~b */
853 (simplify
854 (bit_not (bit_and:cs (bit_not @0) @1))
855 (bit_ior @0 (bit_not @1)))
856
857 /* ~(~a | b) --> a & ~b */
858 (simplify
859 (bit_not (bit_ior:cs (bit_not @0) @1))
860 (bit_and @0 (bit_not @1)))
861
862 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
863 #if GIMPLE
864 (simplify
865 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
866 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
867 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
868 (bit_xor @0 @1)))
869 #endif
870
871 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
872 ((A & N) + B) & M -> (A + B) & M
873 Similarly if (N & M) == 0,
874 ((A | N) + B) & M -> (A + B) & M
875 and for - instead of + (or unary - instead of +)
876 and/or ^ instead of |.
877 If B is constant and (B & M) == 0, fold into A & M. */
878 (for op (plus minus)
879 (for bitop (bit_and bit_ior bit_xor)
880 (simplify
881 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
882 (with
883 { tree pmop[2];
884 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
885 @3, @4, @1, ERROR_MARK, NULL_TREE,
886 NULL_TREE, pmop); }
887 (if (utype)
888 (convert (bit_and (op (convert:utype { pmop[0]; })
889 (convert:utype { pmop[1]; }))
890 (convert:utype @2))))))
891 (simplify
892 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
893 (with
894 { tree pmop[2];
895 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
896 NULL_TREE, NULL_TREE, @1, bitop, @3,
897 @4, pmop); }
898 (if (utype)
899 (convert (bit_and (op (convert:utype { pmop[0]; })
900 (convert:utype { pmop[1]; }))
901 (convert:utype @2)))))))
902 (simplify
903 (bit_and (op:s @0 @1) INTEGER_CST@2)
904 (with
905 { tree pmop[2];
906 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
907 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
908 NULL_TREE, NULL_TREE, pmop); }
909 (if (utype)
910 (convert (bit_and (op (convert:utype { pmop[0]; })
911 (convert:utype { pmop[1]; }))
912 (convert:utype @2)))))))
913 (for bitop (bit_and bit_ior bit_xor)
914 (simplify
915 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
916 (with
917 { tree pmop[2];
918 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
919 bitop, @2, @3, NULL_TREE, ERROR_MARK,
920 NULL_TREE, NULL_TREE, pmop); }
921 (if (utype)
922 (convert (bit_and (negate (convert:utype { pmop[0]; }))
923 (convert:utype @1)))))))
924
925 /* X % Y is smaller than Y. */
926 (for cmp (lt ge)
927 (simplify
928 (cmp (trunc_mod @0 @1) @1)
929 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
930 { constant_boolean_node (cmp == LT_EXPR, type); })))
931 (for cmp (gt le)
932 (simplify
933 (cmp @1 (trunc_mod @0 @1))
934 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
935 { constant_boolean_node (cmp == GT_EXPR, type); })))
936
937 /* x | ~0 -> ~0 */
938 (simplify
939 (bit_ior @0 integer_all_onesp@1)
940 @1)
941
942 /* x | 0 -> x */
943 (simplify
944 (bit_ior @0 integer_zerop)
945 @0)
946
947 /* x & 0 -> 0 */
948 (simplify
949 (bit_and @0 integer_zerop@1)
950 @1)
951
952 /* ~x | x -> -1 */
953 /* ~x ^ x -> -1 */
954 /* ~x + x -> -1 */
955 (for op (bit_ior bit_xor plus)
956 (simplify
957 (op:c (convert? @0) (convert? (bit_not @0)))
958 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
959
960 /* x ^ x -> 0 */
961 (simplify
962 (bit_xor @0 @0)
963 { build_zero_cst (type); })
964
965 /* Canonicalize X ^ ~0 to ~X. */
966 (simplify
967 (bit_xor @0 integer_all_onesp@1)
968 (bit_not @0))
969
970 /* x & ~0 -> x */
971 (simplify
972 (bit_and @0 integer_all_onesp)
973 (non_lvalue @0))
974
975 /* x & x -> x, x | x -> x */
976 (for bitop (bit_and bit_ior)
977 (simplify
978 (bitop @0 @0)
979 (non_lvalue @0)))
980
981 /* x & C -> x if we know that x & ~C == 0. */
982 #if GIMPLE
983 (simplify
984 (bit_and SSA_NAME@0 INTEGER_CST@1)
985 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
986 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
987 @0))
988 #endif
989
990 /* x + (x & 1) -> (x + 1) & ~1 */
991 (simplify
992 (plus:c @0 (bit_and:s @0 integer_onep@1))
993 (bit_and (plus @0 @1) (bit_not @1)))
994
995 /* x & ~(x & y) -> x & ~y */
996 /* x | ~(x | y) -> x | ~y */
997 (for bitop (bit_and bit_ior)
998 (simplify
999 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1000 (bitop @0 (bit_not @1))))
1001
1002 /* (~x & y) | ~(x | y) -> ~x */
1003 (simplify
1004 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1005 @2)
1006
1007 /* (x | y) ^ (x | ~y) -> ~x */
1008 (simplify
1009 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1010 (bit_not @0))
1011
1012 /* (x & y) | ~(x | y) -> ~(x ^ y) */
1013 (simplify
1014 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1015 (bit_not (bit_xor @0 @1)))
1016
1017 /* (~x | y) ^ (x ^ y) -> x | ~y */
1018 (simplify
1019 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1020 (bit_ior @0 (bit_not @1)))
1021
1022 /* (x ^ y) | ~(x | y) -> ~(x & y) */
1023 (simplify
1024 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1025 (bit_not (bit_and @0 @1)))
1026
1027 /* (x | y) & ~x -> y & ~x */
1028 /* (x & y) | ~x -> y | ~x */
1029 (for bitop (bit_and bit_ior)
1030 rbitop (bit_ior bit_and)
1031 (simplify
1032 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1033 (bitop @1 @2)))
1034
1035 /* (x & y) ^ (x | y) -> x ^ y */
1036 (simplify
1037 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1038 (bit_xor @0 @1))
1039
1040 /* (x ^ y) ^ (x | y) -> x & y */
1041 (simplify
1042 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1043 (bit_and @0 @1))
1044
1045 /* (x & y) + (x ^ y) -> x | y */
1046 /* (x & y) | (x ^ y) -> x | y */
1047 /* (x & y) ^ (x ^ y) -> x | y */
1048 (for op (plus bit_ior bit_xor)
1049 (simplify
1050 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1051 (bit_ior @0 @1)))
1052
1053 /* (x & y) + (x | y) -> x + y */
1054 (simplify
1055 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1056 (plus @0 @1))
1057
1058 /* (x + y) - (x | y) -> x & y */
1059 (simplify
1060 (minus (plus @0 @1) (bit_ior @0 @1))
1061 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1062 && !TYPE_SATURATING (type))
1063 (bit_and @0 @1)))
1064
1065 /* (x + y) - (x & y) -> x | y */
1066 (simplify
1067 (minus (plus @0 @1) (bit_and @0 @1))
1068 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1069 && !TYPE_SATURATING (type))
1070 (bit_ior @0 @1)))
1071
1072 /* (x | y) - (x ^ y) -> x & y */
1073 (simplify
1074 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1075 (bit_and @0 @1))
1076
1077 /* (x | y) - (x & y) -> x ^ y */
1078 (simplify
1079 (minus (bit_ior @0 @1) (bit_and @0 @1))
1080 (bit_xor @0 @1))
1081
1082 /* (x | y) & ~(x & y) -> x ^ y */
1083 (simplify
1084 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1085 (bit_xor @0 @1))
1086
1087 /* (x | y) & (~x ^ y) -> x & y */
1088 (simplify
1089 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1090 (bit_and @0 @1))
1091
1092 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1093 (simplify
1094 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1095 (bit_not (bit_xor @0 @1)))
1096
1097 /* (~x | y) ^ (x | ~y) -> x ^ y */
1098 (simplify
1099 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1100 (bit_xor @0 @1))
1101
1102 /* ~x & ~y -> ~(x | y)
1103 ~x | ~y -> ~(x & y) */
1104 (for op (bit_and bit_ior)
1105 rop (bit_ior bit_and)
1106 (simplify
1107 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1108 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1109 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1110 (bit_not (rop (convert @0) (convert @1))))))
1111
1112 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1113 with a constant, and the two constants have no bits in common,
1114 we should treat this as a BIT_IOR_EXPR since this may produce more
1115 simplifications. */
1116 (for op (bit_xor plus)
1117 (simplify
1118 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1119 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1120 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1121 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1122 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1123 (bit_ior (convert @4) (convert @5)))))
1124
1125 /* (X | Y) ^ X -> Y & ~ X*/
1126 (simplify
1127 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1128 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1129 (convert (bit_and @1 (bit_not @0)))))
1130
1131 /* Convert ~X ^ ~Y to X ^ Y. */
1132 (simplify
1133 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1134 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1135 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1136 (bit_xor (convert @0) (convert @1))))
1137
1138 /* Convert ~X ^ C to X ^ ~C. */
1139 (simplify
1140 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1141 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1142 (bit_xor (convert @0) (bit_not @1))))
1143
1144 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1145 (for opo (bit_and bit_xor)
1146 opi (bit_xor bit_and)
1147 (simplify
1148 (opo:c (opi:cs @0 @1) @1)
1149 (bit_and (bit_not @0) @1)))
1150
1151 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1152 operands are another bit-wise operation with a common input. If so,
1153 distribute the bit operations to save an operation and possibly two if
1154 constants are involved. For example, convert
1155 (A | B) & (A | C) into A | (B & C)
1156 Further simplification will occur if B and C are constants. */
1157 (for op (bit_and bit_ior bit_xor)
1158 rop (bit_ior bit_and bit_and)
1159 (simplify
1160 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1161 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1162 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1163 (rop (convert @0) (op (convert @1) (convert @2))))))
1164
1165 /* Some simple reassociation for bit operations, also handled in reassoc. */
1166 /* (X & Y) & Y -> X & Y
1167 (X | Y) | Y -> X | Y */
1168 (for op (bit_and bit_ior)
1169 (simplify
1170 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1171 @2))
1172 /* (X ^ Y) ^ Y -> X */
1173 (simplify
1174 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1175 (convert @0))
1176 /* (X & Y) & (X & Z) -> (X & Y) & Z
1177 (X | Y) | (X | Z) -> (X | Y) | Z */
1178 (for op (bit_and bit_ior)
1179 (simplify
1180 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1181 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1182 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1183 (if (single_use (@5) && single_use (@6))
1184 (op @3 (convert @2))
1185 (if (single_use (@3) && single_use (@4))
1186 (op (convert @1) @5))))))
1187 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1188 (simplify
1189 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1190 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1191 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1192 (bit_xor (convert @1) (convert @2))))
1193
1194 /* Convert abs (abs (X)) into abs (X).
1195 also absu (absu (X)) into absu (X). */
1196 (simplify
1197 (abs (abs@1 @0))
1198 @1)
1199
1200 (simplify
1201 (absu (convert@2 (absu@1 @0)))
1202 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1203 @1))
1204
1205 /* Convert abs[u] (-X) -> abs[u] (X). */
1206 (simplify
1207 (abs (negate @0))
1208 (abs @0))
1209
1210 (simplify
1211 (absu (negate @0))
1212 (absu @0))
1213
1214 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1215 (simplify
1216 (abs tree_expr_nonnegative_p@0)
1217 @0)
1218
1219 (simplify
1220 (absu tree_expr_nonnegative_p@0)
1221 (convert @0))
1222
1223 /* A few cases of fold-const.c negate_expr_p predicate. */
1224 (match negate_expr_p
1225 INTEGER_CST
1226 (if ((INTEGRAL_TYPE_P (type)
1227 && TYPE_UNSIGNED (type))
1228 || (!TYPE_OVERFLOW_SANITIZED (type)
1229 && may_negate_without_overflow_p (t)))))
1230 (match negate_expr_p
1231 FIXED_CST)
1232 (match negate_expr_p
1233 (negate @0)
1234 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1235 (match negate_expr_p
1236 REAL_CST
1237 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1238 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1239 ways. */
1240 (match negate_expr_p
1241 VECTOR_CST
1242 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1243 (match negate_expr_p
1244 (minus @0 @1)
1245 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1246 || (FLOAT_TYPE_P (type)
1247 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1248 && !HONOR_SIGNED_ZEROS (type)))))
1249
1250 /* (-A) * (-B) -> A * B */
1251 (simplify
1252 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1253 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1254 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1255 (mult (convert @0) (convert (negate @1)))))
1256
1257 /* -(A + B) -> (-B) - A. */
1258 (simplify
1259 (negate (plus:c @0 negate_expr_p@1))
1260 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1261 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1262 (minus (negate @1) @0)))
1263
1264 /* -(A - B) -> B - A. */
1265 (simplify
1266 (negate (minus @0 @1))
1267 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1268 || (FLOAT_TYPE_P (type)
1269 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1270 && !HONOR_SIGNED_ZEROS (type)))
1271 (minus @1 @0)))
1272 (simplify
1273 (negate (pointer_diff @0 @1))
1274 (if (TYPE_OVERFLOW_UNDEFINED (type))
1275 (pointer_diff @1 @0)))
1276
1277 /* A - B -> A + (-B) if B is easily negatable. */
1278 (simplify
1279 (minus @0 negate_expr_p@1)
1280 (if (!FIXED_POINT_TYPE_P (type))
1281 (plus @0 (negate @1))))
1282
1283 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1284 when profitable.
1285 For bitwise binary operations apply operand conversions to the
1286 binary operation result instead of to the operands. This allows
1287 to combine successive conversions and bitwise binary operations.
1288 We combine the above two cases by using a conditional convert. */
1289 (for bitop (bit_and bit_ior bit_xor)
1290 (simplify
1291 (bitop (convert @0) (convert? @1))
1292 (if (((TREE_CODE (@1) == INTEGER_CST
1293 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1294 && int_fits_type_p (@1, TREE_TYPE (@0)))
1295 || types_match (@0, @1))
1296 /* ??? This transform conflicts with fold-const.c doing
1297 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1298 constants (if x has signed type, the sign bit cannot be set
1299 in c). This folds extension into the BIT_AND_EXPR.
1300 Restrict it to GIMPLE to avoid endless recursions. */
1301 && (bitop != BIT_AND_EXPR || GIMPLE)
1302 && (/* That's a good idea if the conversion widens the operand, thus
1303 after hoisting the conversion the operation will be narrower. */
1304 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1305 /* It's also a good idea if the conversion is to a non-integer
1306 mode. */
1307 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1308 /* Or if the precision of TO is not the same as the precision
1309 of its mode. */
1310 || !type_has_mode_precision_p (type)))
1311 (convert (bitop @0 (convert @1))))))
1312
1313 (for bitop (bit_and bit_ior)
1314 rbitop (bit_ior bit_and)
1315 /* (x | y) & x -> x */
1316 /* (x & y) | x -> x */
1317 (simplify
1318 (bitop:c (rbitop:c @0 @1) @0)
1319 @0)
1320 /* (~x | y) & x -> x & y */
1321 /* (~x & y) | x -> x | y */
1322 (simplify
1323 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1324 (bitop @0 @1)))
1325
1326 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1327 (simplify
1328 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1329 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1330
1331 /* Combine successive equal operations with constants. */
1332 (for bitop (bit_and bit_ior bit_xor)
1333 (simplify
1334 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1335 (if (!CONSTANT_CLASS_P (@0))
1336 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1337 folded to a constant. */
1338 (bitop @0 (bitop @1 @2))
1339 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1340 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1341 the values involved are such that the operation can't be decided at
1342 compile time. Try folding one of @0 or @1 with @2 to see whether
1343 that combination can be decided at compile time.
1344
1345 Keep the existing form if both folds fail, to avoid endless
1346 oscillation. */
1347 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1348 (if (cst1)
1349 (bitop @1 { cst1; })
1350 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1351 (if (cst2)
1352 (bitop @0 { cst2; }))))))))
1353
1354 /* Try simple folding for X op !X, and X op X with the help
1355 of the truth_valued_p and logical_inverted_value predicates. */
1356 (match truth_valued_p
1357 @0
1358 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1359 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1360 (match truth_valued_p
1361 (op @0 @1)))
1362 (match truth_valued_p
1363 (truth_not @0))
1364
1365 (match (logical_inverted_value @0)
1366 (truth_not @0))
1367 (match (logical_inverted_value @0)
1368 (bit_not truth_valued_p@0))
1369 (match (logical_inverted_value @0)
1370 (eq @0 integer_zerop))
1371 (match (logical_inverted_value @0)
1372 (ne truth_valued_p@0 integer_truep))
1373 (match (logical_inverted_value @0)
1374 (bit_xor truth_valued_p@0 integer_truep))
1375
1376 /* X & !X -> 0. */
1377 (simplify
1378 (bit_and:c @0 (logical_inverted_value @0))
1379 { build_zero_cst (type); })
1380 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1381 (for op (bit_ior bit_xor)
1382 (simplify
1383 (op:c truth_valued_p@0 (logical_inverted_value @0))
1384 { constant_boolean_node (true, type); }))
1385 /* X ==/!= !X is false/true. */
1386 (for op (eq ne)
1387 (simplify
1388 (op:c truth_valued_p@0 (logical_inverted_value @0))
1389 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1390
1391 /* ~~x -> x */
1392 (simplify
1393 (bit_not (bit_not @0))
1394 @0)
1395
1396 /* Convert ~ (-A) to A - 1. */
1397 (simplify
1398 (bit_not (convert? (negate @0)))
1399 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1400 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1401 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1402
1403 /* Convert - (~A) to A + 1. */
1404 (simplify
1405 (negate (nop_convert (bit_not @0)))
1406 (plus (view_convert @0) { build_each_one_cst (type); }))
1407
1408 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1409 (simplify
1410 (bit_not (convert? (minus @0 integer_each_onep)))
1411 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1412 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1413 (convert (negate @0))))
1414 (simplify
1415 (bit_not (convert? (plus @0 integer_all_onesp)))
1416 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1417 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1418 (convert (negate @0))))
1419
1420 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1421 (simplify
1422 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1423 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1424 (convert (bit_xor @0 (bit_not @1)))))
1425 (simplify
1426 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1427 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1428 (convert (bit_xor @0 @1))))
1429
1430 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1431 (simplify
1432 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1433 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1434 (bit_not (bit_xor (view_convert @0) @1))))
1435
1436 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1437 (simplify
1438 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1439 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1440
1441 /* Fold A - (A & B) into ~B & A. */
1442 (simplify
1443 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1444 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1445 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1446 (convert (bit_and (bit_not @1) @0))))
1447
1448 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1449 (for cmp (gt lt ge le)
1450 (simplify
1451 (mult (convert (cmp @0 @1)) @2)
1452 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1453
1454 /* For integral types with undefined overflow and C != 0 fold
1455 x * C EQ/NE y * C into x EQ/NE y. */
1456 (for cmp (eq ne)
1457 (simplify
1458 (cmp (mult:c @0 @1) (mult:c @2 @1))
1459 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1460 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1461 && tree_expr_nonzero_p (@1))
1462 (cmp @0 @2))))
1463
1464 /* For integral types with wrapping overflow and C odd fold
1465 x * C EQ/NE y * C into x EQ/NE y. */
1466 (for cmp (eq ne)
1467 (simplify
1468 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1469 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1470 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1471 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1472 (cmp @0 @2))))
1473
1474 /* For integral types with undefined overflow and C != 0 fold
1475 x * C RELOP y * C into:
1476
1477 x RELOP y for nonnegative C
1478 y RELOP x for negative C */
1479 (for cmp (lt gt le ge)
1480 (simplify
1481 (cmp (mult:c @0 @1) (mult:c @2 @1))
1482 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1483 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1484 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1485 (cmp @0 @2)
1486 (if (TREE_CODE (@1) == INTEGER_CST
1487 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1488 (cmp @2 @0))))))
1489
1490 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1491 (for cmp (le gt)
1492 icmp (gt le)
1493 (simplify
1494 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1495 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1496 && TYPE_UNSIGNED (TREE_TYPE (@0))
1497 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1498 && (wi::to_wide (@2)
1499 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1500 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1501 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1502
1503 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1504 (for cmp (simple_comparison)
1505 (simplify
1506 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
1507 (if (element_precision (@3) >= element_precision (@0)
1508 && types_match (@0, @1))
1509 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1510 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
1511 (cmp @1 @0)
1512 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
1513 (with
1514 {
1515 tree utype = unsigned_type_for (TREE_TYPE (@0));
1516 }
1517 (cmp (convert:utype @1) (convert:utype @0)))))
1518 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
1519 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
1520 (cmp @0 @1)
1521 (with
1522 {
1523 tree utype = unsigned_type_for (TREE_TYPE (@0));
1524 }
1525 (cmp (convert:utype @0) (convert:utype @1)))))))))
1526
1527 /* X / C1 op C2 into a simple range test. */
1528 (for cmp (simple_comparison)
1529 (simplify
1530 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1531 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1532 && integer_nonzerop (@1)
1533 && !TREE_OVERFLOW (@1)
1534 && !TREE_OVERFLOW (@2))
1535 (with { tree lo, hi; bool neg_overflow;
1536 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1537 &neg_overflow); }
1538 (switch
1539 (if (code == LT_EXPR || code == GE_EXPR)
1540 (if (TREE_OVERFLOW (lo))
1541 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1542 (if (code == LT_EXPR)
1543 (lt @0 { lo; })
1544 (ge @0 { lo; }))))
1545 (if (code == LE_EXPR || code == GT_EXPR)
1546 (if (TREE_OVERFLOW (hi))
1547 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1548 (if (code == LE_EXPR)
1549 (le @0 { hi; })
1550 (gt @0 { hi; }))))
1551 (if (!lo && !hi)
1552 { build_int_cst (type, code == NE_EXPR); })
1553 (if (code == EQ_EXPR && !hi)
1554 (ge @0 { lo; }))
1555 (if (code == EQ_EXPR && !lo)
1556 (le @0 { hi; }))
1557 (if (code == NE_EXPR && !hi)
1558 (lt @0 { lo; }))
1559 (if (code == NE_EXPR && !lo)
1560 (gt @0 { hi; }))
1561 (if (GENERIC)
1562 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1563 lo, hi); })
1564 (with
1565 {
1566 tree etype = range_check_type (TREE_TYPE (@0));
1567 if (etype)
1568 {
1569 if (! TYPE_UNSIGNED (etype))
1570 etype = unsigned_type_for (etype);
1571 hi = fold_convert (etype, hi);
1572 lo = fold_convert (etype, lo);
1573 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1574 }
1575 }
1576 (if (etype && hi && !TREE_OVERFLOW (hi))
1577 (if (code == EQ_EXPR)
1578 (le (minus (convert:etype @0) { lo; }) { hi; })
1579 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1580
1581 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1582 (for op (lt le ge gt)
1583 (simplify
1584 (op (plus:c @0 @2) (plus:c @1 @2))
1585 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1586 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1587 (op @0 @1))))
1588 /* For equality and subtraction, this is also true with wrapping overflow. */
1589 (for op (eq ne minus)
1590 (simplify
1591 (op (plus:c @0 @2) (plus:c @1 @2))
1592 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1593 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1594 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1595 (op @0 @1))))
1596
1597 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1598 (for op (lt le ge gt)
1599 (simplify
1600 (op (minus @0 @2) (minus @1 @2))
1601 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1602 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1603 (op @0 @1))))
1604 /* For equality and subtraction, this is also true with wrapping overflow. */
1605 (for op (eq ne minus)
1606 (simplify
1607 (op (minus @0 @2) (minus @1 @2))
1608 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1609 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1610 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1611 (op @0 @1))))
1612 /* And for pointers... */
1613 (for op (simple_comparison)
1614 (simplify
1615 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1616 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1617 (op @0 @1))))
1618 (simplify
1619 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1620 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1621 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1622 (pointer_diff @0 @1)))
1623
1624 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1625 (for op (lt le ge gt)
1626 (simplify
1627 (op (minus @2 @0) (minus @2 @1))
1628 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1629 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1630 (op @1 @0))))
1631 /* For equality and subtraction, this is also true with wrapping overflow. */
1632 (for op (eq ne minus)
1633 (simplify
1634 (op (minus @2 @0) (minus @2 @1))
1635 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1636 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1637 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1638 (op @1 @0))))
1639 /* And for pointers... */
1640 (for op (simple_comparison)
1641 (simplify
1642 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1643 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1644 (op @1 @0))))
1645 (simplify
1646 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1647 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1648 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1649 (pointer_diff @1 @0)))
1650
1651 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1652 (for op (lt le gt ge)
1653 (simplify
1654 (op:c (plus:c@2 @0 @1) @1)
1655 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1656 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1657 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1658 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1659 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1660 /* For equality, this is also true with wrapping overflow. */
1661 (for op (eq ne)
1662 (simplify
1663 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1664 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1665 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1666 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1667 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1668 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1669 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1670 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1671 (simplify
1672 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1673 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1674 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1675 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1676 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1677
1678 /* X - Y < X is the same as Y > 0 when there is no overflow.
1679 For equality, this is also true with wrapping overflow. */
1680 (for op (simple_comparison)
1681 (simplify
1682 (op:c @0 (minus@2 @0 @1))
1683 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1684 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1685 || ((op == EQ_EXPR || op == NE_EXPR)
1686 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1687 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1688 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1689
1690 /* Transform:
1691 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1692 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1693 (for cmp (eq ne)
1694 ocmp (lt ge)
1695 (simplify
1696 (cmp (trunc_div @0 @1) integer_zerop)
1697 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1698 /* Complex ==/!= is allowed, but not </>=. */
1699 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1700 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1701 (ocmp @0 @1))))
1702
1703 /* X == C - X can never be true if C is odd. */
1704 (for cmp (eq ne)
1705 (simplify
1706 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1707 (if (TREE_INT_CST_LOW (@1) & 1)
1708 { constant_boolean_node (cmp == NE_EXPR, type); })))
1709
1710 /* Arguments on which one can call get_nonzero_bits to get the bits
1711 possibly set. */
1712 (match with_possible_nonzero_bits
1713 INTEGER_CST@0)
1714 (match with_possible_nonzero_bits
1715 SSA_NAME@0
1716 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1717 /* Slightly extended version, do not make it recursive to keep it cheap. */
1718 (match (with_possible_nonzero_bits2 @0)
1719 with_possible_nonzero_bits@0)
1720 (match (with_possible_nonzero_bits2 @0)
1721 (bit_and:c with_possible_nonzero_bits@0 @2))
1722
1723 /* Same for bits that are known to be set, but we do not have
1724 an equivalent to get_nonzero_bits yet. */
1725 (match (with_certain_nonzero_bits2 @0)
1726 INTEGER_CST@0)
1727 (match (with_certain_nonzero_bits2 @0)
1728 (bit_ior @1 INTEGER_CST@0))
1729
1730 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1731 (for cmp (eq ne)
1732 (simplify
1733 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1734 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1735 { constant_boolean_node (cmp == NE_EXPR, type); })))
1736
1737 /* ((X inner_op C0) outer_op C1)
1738 With X being a tree where value_range has reasoned certain bits to always be
1739 zero throughout its computed value range,
1740 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1741 where zero_mask has 1's for all bits that are sure to be 0 in
1742 and 0's otherwise.
1743 if (inner_op == '^') C0 &= ~C1;
1744 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1745 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1746 */
1747 (for inner_op (bit_ior bit_xor)
1748 outer_op (bit_xor bit_ior)
1749 (simplify
1750 (outer_op
1751 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1752 (with
1753 {
1754 bool fail = false;
1755 wide_int zero_mask_not;
1756 wide_int C0;
1757 wide_int cst_emit;
1758
1759 if (TREE_CODE (@2) == SSA_NAME)
1760 zero_mask_not = get_nonzero_bits (@2);
1761 else
1762 fail = true;
1763
1764 if (inner_op == BIT_XOR_EXPR)
1765 {
1766 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1767 cst_emit = C0 | wi::to_wide (@1);
1768 }
1769 else
1770 {
1771 C0 = wi::to_wide (@0);
1772 cst_emit = C0 ^ wi::to_wide (@1);
1773 }
1774 }
1775 (if (!fail && (C0 & zero_mask_not) == 0)
1776 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1777 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1778 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1779
1780 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1781 (simplify
1782 (pointer_plus (pointer_plus:s @0 @1) @3)
1783 (pointer_plus @0 (plus @1 @3)))
1784
1785 /* Pattern match
1786 tem1 = (long) ptr1;
1787 tem2 = (long) ptr2;
1788 tem3 = tem2 - tem1;
1789 tem4 = (unsigned long) tem3;
1790 tem5 = ptr1 + tem4;
1791 and produce
1792 tem5 = ptr2; */
1793 (simplify
1794 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1795 /* Conditionally look through a sign-changing conversion. */
1796 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1797 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1798 || (GENERIC && type == TREE_TYPE (@1))))
1799 @1))
1800 (simplify
1801 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1802 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1803 (convert @1)))
1804
1805 /* Pattern match
1806 tem = (sizetype) ptr;
1807 tem = tem & algn;
1808 tem = -tem;
1809 ... = ptr p+ tem;
1810 and produce the simpler and easier to analyze with respect to alignment
1811 ... = ptr & ~algn; */
1812 (simplify
1813 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1814 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1815 (bit_and @0 { algn; })))
1816
1817 /* Try folding difference of addresses. */
1818 (simplify
1819 (minus (convert ADDR_EXPR@0) (convert @1))
1820 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1821 (with { poly_int64 diff; }
1822 (if (ptr_difference_const (@0, @1, &diff))
1823 { build_int_cst_type (type, diff); }))))
1824 (simplify
1825 (minus (convert @0) (convert ADDR_EXPR@1))
1826 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1827 (with { poly_int64 diff; }
1828 (if (ptr_difference_const (@0, @1, &diff))
1829 { build_int_cst_type (type, diff); }))))
1830 (simplify
1831 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1832 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1833 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1834 (with { poly_int64 diff; }
1835 (if (ptr_difference_const (@0, @1, &diff))
1836 { build_int_cst_type (type, diff); }))))
1837 (simplify
1838 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1839 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1840 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1841 (with { poly_int64 diff; }
1842 (if (ptr_difference_const (@0, @1, &diff))
1843 { build_int_cst_type (type, diff); }))))
1844
1845 /* If arg0 is derived from the address of an object or function, we may
1846 be able to fold this expression using the object or function's
1847 alignment. */
1848 (simplify
1849 (bit_and (convert? @0) INTEGER_CST@1)
1850 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1851 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1852 (with
1853 {
1854 unsigned int align;
1855 unsigned HOST_WIDE_INT bitpos;
1856 get_pointer_alignment_1 (@0, &align, &bitpos);
1857 }
1858 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1859 { wide_int_to_tree (type, (wi::to_wide (@1)
1860 & (bitpos / BITS_PER_UNIT))); }))))
1861
1862
1863 /* We can't reassociate at all for saturating types. */
1864 (if (!TYPE_SATURATING (type))
1865
1866 /* Contract negates. */
1867 /* A + (-B) -> A - B */
1868 (simplify
1869 (plus:c @0 (convert? (negate @1)))
1870 /* Apply STRIP_NOPS on the negate. */
1871 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1872 && !TYPE_OVERFLOW_SANITIZED (type))
1873 (with
1874 {
1875 tree t1 = type;
1876 if (INTEGRAL_TYPE_P (type)
1877 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1878 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1879 }
1880 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1881 /* A - (-B) -> A + B */
1882 (simplify
1883 (minus @0 (convert? (negate @1)))
1884 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1885 && !TYPE_OVERFLOW_SANITIZED (type))
1886 (with
1887 {
1888 tree t1 = type;
1889 if (INTEGRAL_TYPE_P (type)
1890 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1891 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1892 }
1893 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1894 /* -(T)(-A) -> (T)A
1895 Sign-extension is ok except for INT_MIN, which thankfully cannot
1896 happen without overflow. */
1897 (simplify
1898 (negate (convert (negate @1)))
1899 (if (INTEGRAL_TYPE_P (type)
1900 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1901 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1902 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1903 && !TYPE_OVERFLOW_SANITIZED (type)
1904 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1905 (convert @1)))
1906 (simplify
1907 (negate (convert negate_expr_p@1))
1908 (if (SCALAR_FLOAT_TYPE_P (type)
1909 && ((DECIMAL_FLOAT_TYPE_P (type)
1910 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1911 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1912 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1913 (convert (negate @1))))
1914 (simplify
1915 (negate (nop_convert (negate @1)))
1916 (if (!TYPE_OVERFLOW_SANITIZED (type)
1917 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1918 (view_convert @1)))
1919
1920 /* We can't reassociate floating-point unless -fassociative-math
1921 or fixed-point plus or minus because of saturation to +-Inf. */
1922 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1923 && !FIXED_POINT_TYPE_P (type))
1924
1925 /* Match patterns that allow contracting a plus-minus pair
1926 irrespective of overflow issues. */
1927 /* (A +- B) - A -> +- B */
1928 /* (A +- B) -+ B -> A */
1929 /* A - (A +- B) -> -+ B */
1930 /* A +- (B -+ A) -> +- B */
1931 (simplify
1932 (minus (plus:c @0 @1) @0)
1933 @1)
1934 (simplify
1935 (minus (minus @0 @1) @0)
1936 (negate @1))
1937 (simplify
1938 (plus:c (minus @0 @1) @1)
1939 @0)
1940 (simplify
1941 (minus @0 (plus:c @0 @1))
1942 (negate @1))
1943 (simplify
1944 (minus @0 (minus @0 @1))
1945 @1)
1946 /* (A +- B) + (C - A) -> C +- B */
1947 /* (A + B) - (A - C) -> B + C */
1948 /* More cases are handled with comparisons. */
1949 (simplify
1950 (plus:c (plus:c @0 @1) (minus @2 @0))
1951 (plus @2 @1))
1952 (simplify
1953 (plus:c (minus @0 @1) (minus @2 @0))
1954 (minus @2 @1))
1955 (simplify
1956 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1957 (if (TYPE_OVERFLOW_UNDEFINED (type)
1958 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1959 (pointer_diff @2 @1)))
1960 (simplify
1961 (minus (plus:c @0 @1) (minus @0 @2))
1962 (plus @1 @2))
1963
1964 /* (A +- CST1) +- CST2 -> A + CST3
1965 Use view_convert because it is safe for vectors and equivalent for
1966 scalars. */
1967 (for outer_op (plus minus)
1968 (for inner_op (plus minus)
1969 neg_inner_op (minus plus)
1970 (simplify
1971 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1972 CONSTANT_CLASS_P@2)
1973 /* If one of the types wraps, use that one. */
1974 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1975 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1976 forever if something doesn't simplify into a constant. */
1977 (if (!CONSTANT_CLASS_P (@0))
1978 (if (outer_op == PLUS_EXPR)
1979 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1980 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
1981 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1982 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1983 (if (outer_op == PLUS_EXPR)
1984 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1985 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1986 /* If the constant operation overflows we cannot do the transform
1987 directly as we would introduce undefined overflow, for example
1988 with (a - 1) + INT_MIN. */
1989 (if (types_match (type, @0))
1990 (with { tree cst = const_binop (outer_op == inner_op
1991 ? PLUS_EXPR : MINUS_EXPR,
1992 type, @1, @2); }
1993 (if (cst && !TREE_OVERFLOW (cst))
1994 (inner_op @0 { cst; } )
1995 /* X+INT_MAX+1 is X-INT_MIN. */
1996 (if (INTEGRAL_TYPE_P (type) && cst
1997 && wi::to_wide (cst) == wi::min_value (type))
1998 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1999 /* Last resort, use some unsigned type. */
2000 (with { tree utype = unsigned_type_for (type); }
2001 (if (utype)
2002 (view_convert (inner_op
2003 (view_convert:utype @0)
2004 (view_convert:utype
2005 { drop_tree_overflow (cst); }))))))))))))))
2006
2007 /* (CST1 - A) +- CST2 -> CST3 - A */
2008 (for outer_op (plus minus)
2009 (simplify
2010 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
2011 (with { tree cst = const_binop (outer_op, type, @1, @2); }
2012 (if (cst && !TREE_OVERFLOW (cst))
2013 (minus { cst; } @0)))))
2014
2015 /* CST1 - (CST2 - A) -> CST3 + A */
2016 (simplify
2017 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
2018 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2019 (if (cst && !TREE_OVERFLOW (cst))
2020 (plus { cst; } @0))))
2021
2022 /* ~A + A -> -1 */
2023 (simplify
2024 (plus:c (bit_not @0) @0)
2025 (if (!TYPE_OVERFLOW_TRAPS (type))
2026 { build_all_ones_cst (type); }))
2027
2028 /* ~A + 1 -> -A */
2029 (simplify
2030 (plus (convert? (bit_not @0)) integer_each_onep)
2031 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2032 (negate (convert @0))))
2033
2034 /* -A - 1 -> ~A */
2035 (simplify
2036 (minus (convert? (negate @0)) integer_each_onep)
2037 (if (!TYPE_OVERFLOW_TRAPS (type)
2038 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2039 (bit_not (convert @0))))
2040
2041 /* -1 - A -> ~A */
2042 (simplify
2043 (minus integer_all_onesp @0)
2044 (bit_not @0))
2045
2046 /* (T)(P + A) - (T)P -> (T) A */
2047 (simplify
2048 (minus (convert (plus:c @@0 @1))
2049 (convert? @0))
2050 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2051 /* For integer types, if A has a smaller type
2052 than T the result depends on the possible
2053 overflow in P + A.
2054 E.g. T=size_t, A=(unsigned)429497295, P>0.
2055 However, if an overflow in P + A would cause
2056 undefined behavior, we can assume that there
2057 is no overflow. */
2058 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2059 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2060 (convert @1)))
2061 (simplify
2062 (minus (convert (pointer_plus @@0 @1))
2063 (convert @0))
2064 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2065 /* For pointer types, if the conversion of A to the
2066 final type requires a sign- or zero-extension,
2067 then we have to punt - it is not defined which
2068 one is correct. */
2069 || (POINTER_TYPE_P (TREE_TYPE (@0))
2070 && TREE_CODE (@1) == INTEGER_CST
2071 && tree_int_cst_sign_bit (@1) == 0))
2072 (convert @1)))
2073 (simplify
2074 (pointer_diff (pointer_plus @@0 @1) @0)
2075 /* The second argument of pointer_plus must be interpreted as signed, and
2076 thus sign-extended if necessary. */
2077 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2078 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2079 second arg is unsigned even when we need to consider it as signed,
2080 we don't want to diagnose overflow here. */
2081 (convert (view_convert:stype @1))))
2082
2083 /* (T)P - (T)(P + A) -> -(T) A */
2084 (simplify
2085 (minus (convert? @0)
2086 (convert (plus:c @@0 @1)))
2087 (if (INTEGRAL_TYPE_P (type)
2088 && TYPE_OVERFLOW_UNDEFINED (type)
2089 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2090 (with { tree utype = unsigned_type_for (type); }
2091 (convert (negate (convert:utype @1))))
2092 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2093 /* For integer types, if A has a smaller type
2094 than T the result depends on the possible
2095 overflow in P + A.
2096 E.g. T=size_t, A=(unsigned)429497295, P>0.
2097 However, if an overflow in P + A would cause
2098 undefined behavior, we can assume that there
2099 is no overflow. */
2100 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2101 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2102 (negate (convert @1)))))
2103 (simplify
2104 (minus (convert @0)
2105 (convert (pointer_plus @@0 @1)))
2106 (if (INTEGRAL_TYPE_P (type)
2107 && TYPE_OVERFLOW_UNDEFINED (type)
2108 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2109 (with { tree utype = unsigned_type_for (type); }
2110 (convert (negate (convert:utype @1))))
2111 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2112 /* For pointer types, if the conversion of A to the
2113 final type requires a sign- or zero-extension,
2114 then we have to punt - it is not defined which
2115 one is correct. */
2116 || (POINTER_TYPE_P (TREE_TYPE (@0))
2117 && TREE_CODE (@1) == INTEGER_CST
2118 && tree_int_cst_sign_bit (@1) == 0))
2119 (negate (convert @1)))))
2120 (simplify
2121 (pointer_diff @0 (pointer_plus @@0 @1))
2122 /* The second argument of pointer_plus must be interpreted as signed, and
2123 thus sign-extended if necessary. */
2124 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2125 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2126 second arg is unsigned even when we need to consider it as signed,
2127 we don't want to diagnose overflow here. */
2128 (negate (convert (view_convert:stype @1)))))
2129
2130 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2131 (simplify
2132 (minus (convert (plus:c @@0 @1))
2133 (convert (plus:c @0 @2)))
2134 (if (INTEGRAL_TYPE_P (type)
2135 && TYPE_OVERFLOW_UNDEFINED (type)
2136 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2137 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
2138 (with { tree utype = unsigned_type_for (type); }
2139 (convert (minus (convert:utype @1) (convert:utype @2))))
2140 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2141 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2142 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2143 /* For integer types, if A has a smaller type
2144 than T the result depends on the possible
2145 overflow in P + A.
2146 E.g. T=size_t, A=(unsigned)429497295, P>0.
2147 However, if an overflow in P + A would cause
2148 undefined behavior, we can assume that there
2149 is no overflow. */
2150 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2151 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2152 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2153 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2154 (minus (convert @1) (convert @2)))))
2155 (simplify
2156 (minus (convert (pointer_plus @@0 @1))
2157 (convert (pointer_plus @0 @2)))
2158 (if (INTEGRAL_TYPE_P (type)
2159 && TYPE_OVERFLOW_UNDEFINED (type)
2160 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2161 (with { tree utype = unsigned_type_for (type); }
2162 (convert (minus (convert:utype @1) (convert:utype @2))))
2163 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2164 /* For pointer types, if the conversion of A to the
2165 final type requires a sign- or zero-extension,
2166 then we have to punt - it is not defined which
2167 one is correct. */
2168 || (POINTER_TYPE_P (TREE_TYPE (@0))
2169 && TREE_CODE (@1) == INTEGER_CST
2170 && tree_int_cst_sign_bit (@1) == 0
2171 && TREE_CODE (@2) == INTEGER_CST
2172 && tree_int_cst_sign_bit (@2) == 0))
2173 (minus (convert @1) (convert @2)))))
2174 (simplify
2175 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2176 /* The second argument of pointer_plus must be interpreted as signed, and
2177 thus sign-extended if necessary. */
2178 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2179 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2180 second arg is unsigned even when we need to consider it as signed,
2181 we don't want to diagnose overflow here. */
2182 (minus (convert (view_convert:stype @1))
2183 (convert (view_convert:stype @2)))))))
2184
2185 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2186 Modeled after fold_plusminus_mult_expr. */
2187 (if (!TYPE_SATURATING (type)
2188 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2189 (for plusminus (plus minus)
2190 (simplify
2191 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2192 (if ((!ANY_INTEGRAL_TYPE_P (type)
2193 || TYPE_OVERFLOW_WRAPS (type)
2194 || (INTEGRAL_TYPE_P (type)
2195 && tree_expr_nonzero_p (@0)
2196 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2197 /* If @1 +- @2 is constant require a hard single-use on either
2198 original operand (but not on both). */
2199 && (single_use (@3) || single_use (@4)))
2200 (mult (plusminus @1 @2) @0)))
2201 /* We cannot generate constant 1 for fract. */
2202 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2203 (simplify
2204 (plusminus @0 (mult:c@3 @0 @2))
2205 (if ((!ANY_INTEGRAL_TYPE_P (type)
2206 || TYPE_OVERFLOW_WRAPS (type)
2207 || (INTEGRAL_TYPE_P (type)
2208 && tree_expr_nonzero_p (@0)
2209 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2210 && single_use (@3))
2211 (mult (plusminus { build_one_cst (type); } @2) @0)))
2212 (simplify
2213 (plusminus (mult:c@3 @0 @2) @0)
2214 (if ((!ANY_INTEGRAL_TYPE_P (type)
2215 || TYPE_OVERFLOW_WRAPS (type)
2216 || (INTEGRAL_TYPE_P (type)
2217 && tree_expr_nonzero_p (@0)
2218 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2219 && single_use (@3))
2220 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2221
2222 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
2223
2224 (for minmax (min max FMIN_ALL FMAX_ALL)
2225 (simplify
2226 (minmax @0 @0)
2227 @0))
2228 /* min(max(x,y),y) -> y. */
2229 (simplify
2230 (min:c (max:c @0 @1) @1)
2231 @1)
2232 /* max(min(x,y),y) -> y. */
2233 (simplify
2234 (max:c (min:c @0 @1) @1)
2235 @1)
2236 /* max(a,-a) -> abs(a). */
2237 (simplify
2238 (max:c @0 (negate @0))
2239 (if (TREE_CODE (type) != COMPLEX_TYPE
2240 && (! ANY_INTEGRAL_TYPE_P (type)
2241 || TYPE_OVERFLOW_UNDEFINED (type)))
2242 (abs @0)))
2243 /* min(a,-a) -> -abs(a). */
2244 (simplify
2245 (min:c @0 (negate @0))
2246 (if (TREE_CODE (type) != COMPLEX_TYPE
2247 && (! ANY_INTEGRAL_TYPE_P (type)
2248 || TYPE_OVERFLOW_UNDEFINED (type)))
2249 (negate (abs @0))))
2250 (simplify
2251 (min @0 @1)
2252 (switch
2253 (if (INTEGRAL_TYPE_P (type)
2254 && TYPE_MIN_VALUE (type)
2255 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2256 @1)
2257 (if (INTEGRAL_TYPE_P (type)
2258 && TYPE_MAX_VALUE (type)
2259 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2260 @0)))
2261 (simplify
2262 (max @0 @1)
2263 (switch
2264 (if (INTEGRAL_TYPE_P (type)
2265 && TYPE_MAX_VALUE (type)
2266 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2267 @1)
2268 (if (INTEGRAL_TYPE_P (type)
2269 && TYPE_MIN_VALUE (type)
2270 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2271 @0)))
2272
2273 /* max (a, a + CST) -> a + CST where CST is positive. */
2274 /* max (a, a + CST) -> a where CST is negative. */
2275 (simplify
2276 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2277 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2278 (if (tree_int_cst_sgn (@1) > 0)
2279 @2
2280 @0)))
2281
2282 /* min (a, a + CST) -> a where CST is positive. */
2283 /* min (a, a + CST) -> a + CST where CST is negative. */
2284 (simplify
2285 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2286 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2287 (if (tree_int_cst_sgn (@1) > 0)
2288 @0
2289 @2)))
2290
2291 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2292 and the outer convert demotes the expression back to x's type. */
2293 (for minmax (min max)
2294 (simplify
2295 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2296 (if (INTEGRAL_TYPE_P (type)
2297 && types_match (@1, type) && int_fits_type_p (@2, type)
2298 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2299 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2300 (minmax @1 (convert @2)))))
2301
2302 (for minmax (FMIN_ALL FMAX_ALL)
2303 /* If either argument is NaN, return the other one. Avoid the
2304 transformation if we get (and honor) a signalling NaN. */
2305 (simplify
2306 (minmax:c @0 REAL_CST@1)
2307 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2308 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2309 @0)))
2310 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2311 functions to return the numeric arg if the other one is NaN.
2312 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2313 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2314 worry about it either. */
2315 (if (flag_finite_math_only)
2316 (simplify
2317 (FMIN_ALL @0 @1)
2318 (min @0 @1))
2319 (simplify
2320 (FMAX_ALL @0 @1)
2321 (max @0 @1)))
2322 /* min (-A, -B) -> -max (A, B) */
2323 (for minmax (min max FMIN_ALL FMAX_ALL)
2324 maxmin (max min FMAX_ALL FMIN_ALL)
2325 (simplify
2326 (minmax (negate:s@2 @0) (negate:s@3 @1))
2327 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2328 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2329 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2330 (negate (maxmin @0 @1)))))
2331 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2332 MAX (~X, ~Y) -> ~MIN (X, Y) */
2333 (for minmax (min max)
2334 maxmin (max min)
2335 (simplify
2336 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2337 (bit_not (maxmin @0 @1))))
2338
2339 /* MIN (X, Y) == X -> X <= Y */
2340 (for minmax (min min max max)
2341 cmp (eq ne eq ne )
2342 out (le gt ge lt )
2343 (simplify
2344 (cmp:c (minmax:c @0 @1) @0)
2345 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2346 (out @0 @1))))
2347 /* MIN (X, 5) == 0 -> X == 0
2348 MIN (X, 5) == 7 -> false */
2349 (for cmp (eq ne)
2350 (simplify
2351 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2352 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2353 TYPE_SIGN (TREE_TYPE (@0))))
2354 { constant_boolean_node (cmp == NE_EXPR, type); }
2355 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2356 TYPE_SIGN (TREE_TYPE (@0))))
2357 (cmp @0 @2)))))
2358 (for cmp (eq ne)
2359 (simplify
2360 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2361 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2362 TYPE_SIGN (TREE_TYPE (@0))))
2363 { constant_boolean_node (cmp == NE_EXPR, type); }
2364 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2365 TYPE_SIGN (TREE_TYPE (@0))))
2366 (cmp @0 @2)))))
2367 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2368 (for minmax (min min max max min min max max )
2369 cmp (lt le gt ge gt ge lt le )
2370 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2371 (simplify
2372 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2373 (comb (cmp @0 @2) (cmp @1 @2))))
2374
2375 /* Simplifications of shift and rotates. */
2376
2377 (for rotate (lrotate rrotate)
2378 (simplify
2379 (rotate integer_all_onesp@0 @1)
2380 @0))
2381
2382 /* Optimize -1 >> x for arithmetic right shifts. */
2383 (simplify
2384 (rshift integer_all_onesp@0 @1)
2385 (if (!TYPE_UNSIGNED (type)
2386 && tree_expr_nonnegative_p (@1))
2387 @0))
2388
2389 /* Optimize (x >> c) << c into x & (-1<<c). */
2390 (simplify
2391 (lshift (rshift @0 INTEGER_CST@1) @1)
2392 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2393 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2394
2395 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2396 types. */
2397 (simplify
2398 (rshift (lshift @0 INTEGER_CST@1) @1)
2399 (if (TYPE_UNSIGNED (type)
2400 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2401 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2402
2403 (for shiftrotate (lrotate rrotate lshift rshift)
2404 (simplify
2405 (shiftrotate @0 integer_zerop)
2406 (non_lvalue @0))
2407 (simplify
2408 (shiftrotate integer_zerop@0 @1)
2409 @0)
2410 /* Prefer vector1 << scalar to vector1 << vector2
2411 if vector2 is uniform. */
2412 (for vec (VECTOR_CST CONSTRUCTOR)
2413 (simplify
2414 (shiftrotate @0 vec@1)
2415 (with { tree tem = uniform_vector_p (@1); }
2416 (if (tem)
2417 (shiftrotate @0 { tem; }))))))
2418
2419 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2420 Y is 0. Similarly for X >> Y. */
2421 #if GIMPLE
2422 (for shift (lshift rshift)
2423 (simplify
2424 (shift @0 SSA_NAME@1)
2425 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2426 (with {
2427 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2428 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2429 }
2430 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2431 @0)))))
2432 #endif
2433
2434 /* Rewrite an LROTATE_EXPR by a constant into an
2435 RROTATE_EXPR by a new constant. */
2436 (simplify
2437 (lrotate @0 INTEGER_CST@1)
2438 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2439 build_int_cst (TREE_TYPE (@1),
2440 element_precision (type)), @1); }))
2441
2442 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2443 (for op (lrotate rrotate rshift lshift)
2444 (simplify
2445 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2446 (with { unsigned int prec = element_precision (type); }
2447 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2448 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2449 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2450 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2451 (with { unsigned int low = (tree_to_uhwi (@1)
2452 + tree_to_uhwi (@2)); }
2453 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2454 being well defined. */
2455 (if (low >= prec)
2456 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2457 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2458 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2459 { build_zero_cst (type); }
2460 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2461 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2462
2463
2464 /* ((1 << A) & 1) != 0 -> A == 0
2465 ((1 << A) & 1) == 0 -> A != 0 */
2466 (for cmp (ne eq)
2467 icmp (eq ne)
2468 (simplify
2469 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2470 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2471
2472 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2473 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2474 if CST2 != 0. */
2475 (for cmp (ne eq)
2476 (simplify
2477 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2478 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2479 (if (cand < 0
2480 || (!integer_zerop (@2)
2481 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2482 { constant_boolean_node (cmp == NE_EXPR, type); }
2483 (if (!integer_zerop (@2)
2484 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2485 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2486
2487 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2488 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2489 if the new mask might be further optimized. */
2490 (for shift (lshift rshift)
2491 (simplify
2492 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2493 INTEGER_CST@2)
2494 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2495 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2496 && tree_fits_uhwi_p (@1)
2497 && tree_to_uhwi (@1) > 0
2498 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2499 (with
2500 {
2501 unsigned int shiftc = tree_to_uhwi (@1);
2502 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2503 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2504 tree shift_type = TREE_TYPE (@3);
2505 unsigned int prec;
2506
2507 if (shift == LSHIFT_EXPR)
2508 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2509 else if (shift == RSHIFT_EXPR
2510 && type_has_mode_precision_p (shift_type))
2511 {
2512 prec = TYPE_PRECISION (TREE_TYPE (@3));
2513 tree arg00 = @0;
2514 /* See if more bits can be proven as zero because of
2515 zero extension. */
2516 if (@3 != @0
2517 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2518 {
2519 tree inner_type = TREE_TYPE (@0);
2520 if (type_has_mode_precision_p (inner_type)
2521 && TYPE_PRECISION (inner_type) < prec)
2522 {
2523 prec = TYPE_PRECISION (inner_type);
2524 /* See if we can shorten the right shift. */
2525 if (shiftc < prec)
2526 shift_type = inner_type;
2527 /* Otherwise X >> C1 is all zeros, so we'll optimize
2528 it into (X, 0) later on by making sure zerobits
2529 is all ones. */
2530 }
2531 }
2532 zerobits = HOST_WIDE_INT_M1U;
2533 if (shiftc < prec)
2534 {
2535 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2536 zerobits <<= prec - shiftc;
2537 }
2538 /* For arithmetic shift if sign bit could be set, zerobits
2539 can contain actually sign bits, so no transformation is
2540 possible, unless MASK masks them all away. In that
2541 case the shift needs to be converted into logical shift. */
2542 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2543 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2544 {
2545 if ((mask & zerobits) == 0)
2546 shift_type = unsigned_type_for (TREE_TYPE (@3));
2547 else
2548 zerobits = 0;
2549 }
2550 }
2551 }
2552 /* ((X << 16) & 0xff00) is (X, 0). */
2553 (if ((mask & zerobits) == mask)
2554 { build_int_cst (type, 0); }
2555 (with { newmask = mask | zerobits; }
2556 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2557 (with
2558 {
2559 /* Only do the transformation if NEWMASK is some integer
2560 mode's mask. */
2561 for (prec = BITS_PER_UNIT;
2562 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2563 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2564 break;
2565 }
2566 (if (prec < HOST_BITS_PER_WIDE_INT
2567 || newmask == HOST_WIDE_INT_M1U)
2568 (with
2569 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2570 (if (!tree_int_cst_equal (newmaskt, @2))
2571 (if (shift_type != TREE_TYPE (@3))
2572 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2573 (bit_and @4 { newmaskt; })))))))))))))
2574
2575 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2576 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2577 (for shift (lshift rshift)
2578 (for bit_op (bit_and bit_xor bit_ior)
2579 (simplify
2580 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2581 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2582 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2583 (bit_op (shift (convert @0) @1) { mask; }))))))
2584
2585 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2586 (simplify
2587 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2588 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2589 && (element_precision (TREE_TYPE (@0))
2590 <= element_precision (TREE_TYPE (@1))
2591 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2592 (with
2593 { tree shift_type = TREE_TYPE (@0); }
2594 (convert (rshift (convert:shift_type @1) @2)))))
2595
2596 /* ~(~X >>r Y) -> X >>r Y
2597 ~(~X <<r Y) -> X <<r Y */
2598 (for rotate (lrotate rrotate)
2599 (simplify
2600 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2601 (if ((element_precision (TREE_TYPE (@0))
2602 <= element_precision (TREE_TYPE (@1))
2603 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2604 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2605 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2606 (with
2607 { tree rotate_type = TREE_TYPE (@0); }
2608 (convert (rotate (convert:rotate_type @1) @2))))))
2609
2610 /* Simplifications of conversions. */
2611
2612 /* Basic strip-useless-type-conversions / strip_nops. */
2613 (for cvt (convert view_convert float fix_trunc)
2614 (simplify
2615 (cvt @0)
2616 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2617 || (GENERIC && type == TREE_TYPE (@0)))
2618 @0)))
2619
2620 /* Contract view-conversions. */
2621 (simplify
2622 (view_convert (view_convert @0))
2623 (view_convert @0))
2624
2625 /* For integral conversions with the same precision or pointer
2626 conversions use a NOP_EXPR instead. */
2627 (simplify
2628 (view_convert @0)
2629 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2630 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2631 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2632 (convert @0)))
2633
2634 /* Strip inner integral conversions that do not change precision or size, or
2635 zero-extend while keeping the same size (for bool-to-char). */
2636 (simplify
2637 (view_convert (convert@0 @1))
2638 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2639 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2640 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2641 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2642 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2643 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2644 (view_convert @1)))
2645
2646 /* Simplify a view-converted empty constructor. */
2647 (simplify
2648 (view_convert CONSTRUCTOR@0)
2649 (if (TREE_CODE (@0) != SSA_NAME
2650 && CONSTRUCTOR_NELTS (@0) == 0)
2651 { build_zero_cst (type); }))
2652
2653 /* Re-association barriers around constants and other re-association
2654 barriers can be removed. */
2655 (simplify
2656 (paren CONSTANT_CLASS_P@0)
2657 @0)
2658 (simplify
2659 (paren (paren@1 @0))
2660 @1)
2661
2662 /* Handle cases of two conversions in a row. */
2663 (for ocvt (convert float fix_trunc)
2664 (for icvt (convert float)
2665 (simplify
2666 (ocvt (icvt@1 @0))
2667 (with
2668 {
2669 tree inside_type = TREE_TYPE (@0);
2670 tree inter_type = TREE_TYPE (@1);
2671 int inside_int = INTEGRAL_TYPE_P (inside_type);
2672 int inside_ptr = POINTER_TYPE_P (inside_type);
2673 int inside_float = FLOAT_TYPE_P (inside_type);
2674 int inside_vec = VECTOR_TYPE_P (inside_type);
2675 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2676 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2677 int inter_int = INTEGRAL_TYPE_P (inter_type);
2678 int inter_ptr = POINTER_TYPE_P (inter_type);
2679 int inter_float = FLOAT_TYPE_P (inter_type);
2680 int inter_vec = VECTOR_TYPE_P (inter_type);
2681 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2682 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2683 int final_int = INTEGRAL_TYPE_P (type);
2684 int final_ptr = POINTER_TYPE_P (type);
2685 int final_float = FLOAT_TYPE_P (type);
2686 int final_vec = VECTOR_TYPE_P (type);
2687 unsigned int final_prec = TYPE_PRECISION (type);
2688 int final_unsignedp = TYPE_UNSIGNED (type);
2689 }
2690 (switch
2691 /* In addition to the cases of two conversions in a row
2692 handled below, if we are converting something to its own
2693 type via an object of identical or wider precision, neither
2694 conversion is needed. */
2695 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2696 || (GENERIC
2697 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2698 && (((inter_int || inter_ptr) && final_int)
2699 || (inter_float && final_float))
2700 && inter_prec >= final_prec)
2701 (ocvt @0))
2702
2703 /* Likewise, if the intermediate and initial types are either both
2704 float or both integer, we don't need the middle conversion if the
2705 former is wider than the latter and doesn't change the signedness
2706 (for integers). Avoid this if the final type is a pointer since
2707 then we sometimes need the middle conversion. */
2708 (if (((inter_int && inside_int) || (inter_float && inside_float))
2709 && (final_int || final_float)
2710 && inter_prec >= inside_prec
2711 && (inter_float || inter_unsignedp == inside_unsignedp))
2712 (ocvt @0))
2713
2714 /* If we have a sign-extension of a zero-extended value, we can
2715 replace that by a single zero-extension. Likewise if the
2716 final conversion does not change precision we can drop the
2717 intermediate conversion. */
2718 (if (inside_int && inter_int && final_int
2719 && ((inside_prec < inter_prec && inter_prec < final_prec
2720 && inside_unsignedp && !inter_unsignedp)
2721 || final_prec == inter_prec))
2722 (ocvt @0))
2723
2724 /* Two conversions in a row are not needed unless:
2725 - some conversion is floating-point (overstrict for now), or
2726 - some conversion is a vector (overstrict for now), or
2727 - the intermediate type is narrower than both initial and
2728 final, or
2729 - the intermediate type and innermost type differ in signedness,
2730 and the outermost type is wider than the intermediate, or
2731 - the initial type is a pointer type and the precisions of the
2732 intermediate and final types differ, or
2733 - the final type is a pointer type and the precisions of the
2734 initial and intermediate types differ. */
2735 (if (! inside_float && ! inter_float && ! final_float
2736 && ! inside_vec && ! inter_vec && ! final_vec
2737 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2738 && ! (inside_int && inter_int
2739 && inter_unsignedp != inside_unsignedp
2740 && inter_prec < final_prec)
2741 && ((inter_unsignedp && inter_prec > inside_prec)
2742 == (final_unsignedp && final_prec > inter_prec))
2743 && ! (inside_ptr && inter_prec != final_prec)
2744 && ! (final_ptr && inside_prec != inter_prec))
2745 (ocvt @0))
2746
2747 /* A truncation to an unsigned type (a zero-extension) should be
2748 canonicalized as bitwise and of a mask. */
2749 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2750 && final_int && inter_int && inside_int
2751 && final_prec == inside_prec
2752 && final_prec > inter_prec
2753 && inter_unsignedp)
2754 (convert (bit_and @0 { wide_int_to_tree
2755 (inside_type,
2756 wi::mask (inter_prec, false,
2757 TYPE_PRECISION (inside_type))); })))
2758
2759 /* If we are converting an integer to a floating-point that can
2760 represent it exactly and back to an integer, we can skip the
2761 floating-point conversion. */
2762 (if (GIMPLE /* PR66211 */
2763 && inside_int && inter_float && final_int &&
2764 (unsigned) significand_size (TYPE_MODE (inter_type))
2765 >= inside_prec - !inside_unsignedp)
2766 (convert @0)))))))
2767
2768 /* If we have a narrowing conversion to an integral type that is fed by a
2769 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2770 masks off bits outside the final type (and nothing else). */
2771 (simplify
2772 (convert (bit_and @0 INTEGER_CST@1))
2773 (if (INTEGRAL_TYPE_P (type)
2774 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2775 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2776 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2777 TYPE_PRECISION (type)), 0))
2778 (convert @0)))
2779
2780
2781 /* (X /[ex] A) * A -> X. */
2782 (simplify
2783 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2784 (convert @0))
2785
2786 /* Simplify (A / B) * B + (A % B) -> A. */
2787 (for div (trunc_div ceil_div floor_div round_div)
2788 mod (trunc_mod ceil_mod floor_mod round_mod)
2789 (simplify
2790 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
2791 @0))
2792
2793 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
2794 (for op (plus minus)
2795 (simplify
2796 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2797 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2798 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2799 (with
2800 {
2801 wi::overflow_type overflow;
2802 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2803 TYPE_SIGN (type), &overflow);
2804 }
2805 (if (types_match (type, TREE_TYPE (@2))
2806 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2807 (op @0 { wide_int_to_tree (type, mul); })
2808 (with { tree utype = unsigned_type_for (type); }
2809 (convert (op (convert:utype @0)
2810 (mult (convert:utype @1) (convert:utype @2))))))))))
2811
2812 /* Canonicalization of binary operations. */
2813
2814 /* Convert X + -C into X - C. */
2815 (simplify
2816 (plus @0 REAL_CST@1)
2817 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2818 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2819 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2820 (minus @0 { tem; })))))
2821
2822 /* Convert x+x into x*2. */
2823 (simplify
2824 (plus @0 @0)
2825 (if (SCALAR_FLOAT_TYPE_P (type))
2826 (mult @0 { build_real (type, dconst2); })
2827 (if (INTEGRAL_TYPE_P (type))
2828 (mult @0 { build_int_cst (type, 2); }))))
2829
2830 /* 0 - X -> -X. */
2831 (simplify
2832 (minus integer_zerop @1)
2833 (negate @1))
2834 (simplify
2835 (pointer_diff integer_zerop @1)
2836 (negate (convert @1)))
2837
2838 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2839 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2840 (-ARG1 + ARG0) reduces to -ARG1. */
2841 (simplify
2842 (minus real_zerop@0 @1)
2843 (if (fold_real_zero_addition_p (type, @0, 0))
2844 (negate @1)))
2845
2846 /* Transform x * -1 into -x. */
2847 (simplify
2848 (mult @0 integer_minus_onep)
2849 (negate @0))
2850
2851 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2852 signed overflow for CST != 0 && CST != -1. */
2853 (simplify
2854 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
2855 (if (TREE_CODE (@2) != INTEGER_CST
2856 && single_use (@3)
2857 && !integer_zerop (@1) && !integer_minus_onep (@1))
2858 (mult (mult @0 @2) @1)))
2859
2860 /* True if we can easily extract the real and imaginary parts of a complex
2861 number. */
2862 (match compositional_complex
2863 (convert? (complex @0 @1)))
2864
2865 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2866 (simplify
2867 (complex (realpart @0) (imagpart @0))
2868 @0)
2869 (simplify
2870 (realpart (complex @0 @1))
2871 @0)
2872 (simplify
2873 (imagpart (complex @0 @1))
2874 @1)
2875
2876 /* Sometimes we only care about half of a complex expression. */
2877 (simplify
2878 (realpart (convert?:s (conj:s @0)))
2879 (convert (realpart @0)))
2880 (simplify
2881 (imagpart (convert?:s (conj:s @0)))
2882 (convert (negate (imagpart @0))))
2883 (for part (realpart imagpart)
2884 (for op (plus minus)
2885 (simplify
2886 (part (convert?:s@2 (op:s @0 @1)))
2887 (convert (op (part @0) (part @1))))))
2888 (simplify
2889 (realpart (convert?:s (CEXPI:s @0)))
2890 (convert (COS @0)))
2891 (simplify
2892 (imagpart (convert?:s (CEXPI:s @0)))
2893 (convert (SIN @0)))
2894
2895 /* conj(conj(x)) -> x */
2896 (simplify
2897 (conj (convert? (conj @0)))
2898 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2899 (convert @0)))
2900
2901 /* conj({x,y}) -> {x,-y} */
2902 (simplify
2903 (conj (convert?:s (complex:s @0 @1)))
2904 (with { tree itype = TREE_TYPE (type); }
2905 (complex (convert:itype @0) (negate (convert:itype @1)))))
2906
2907 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2908 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2909 (simplify
2910 (bswap (bswap @0))
2911 @0)
2912 (simplify
2913 (bswap (bit_not (bswap @0)))
2914 (bit_not @0))
2915 (for bitop (bit_xor bit_ior bit_and)
2916 (simplify
2917 (bswap (bitop:c (bswap @0) @1))
2918 (bitop @0 (bswap @1)))))
2919
2920
2921 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2922
2923 /* Simplify constant conditions.
2924 Only optimize constant conditions when the selected branch
2925 has the same type as the COND_EXPR. This avoids optimizing
2926 away "c ? x : throw", where the throw has a void type.
2927 Note that we cannot throw away the fold-const.c variant nor
2928 this one as we depend on doing this transform before possibly
2929 A ? B : B -> B triggers and the fold-const.c one can optimize
2930 0 ? A : B to B even if A has side-effects. Something
2931 genmatch cannot handle. */
2932 (simplify
2933 (cond INTEGER_CST@0 @1 @2)
2934 (if (integer_zerop (@0))
2935 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2936 @2)
2937 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2938 @1)))
2939 (simplify
2940 (vec_cond VECTOR_CST@0 @1 @2)
2941 (if (integer_all_onesp (@0))
2942 @1
2943 (if (integer_zerop (@0))
2944 @2)))
2945
2946 /* Sink unary operations to constant branches, but only if we do fold it to
2947 constants. */
2948 (for op (negate bit_not abs absu)
2949 (simplify
2950 (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2))
2951 (with
2952 {
2953 tree cst1, cst2;
2954 cst1 = const_unop (op, type, @1);
2955 if (cst1)
2956 cst2 = const_unop (op, type, @2);
2957 }
2958 (if (cst1 && cst2)
2959 (vec_cond @0 { cst1; } { cst2; })))))
2960
2961 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2962 be extended. */
2963 /* This pattern implements two kinds simplification:
2964
2965 Case 1)
2966 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2967 1) Conversions are type widening from smaller type.
2968 2) Const c1 equals to c2 after canonicalizing comparison.
2969 3) Comparison has tree code LT, LE, GT or GE.
2970 This specific pattern is needed when (cmp (convert x) c) may not
2971 be simplified by comparison patterns because of multiple uses of
2972 x. It also makes sense here because simplifying across multiple
2973 referred var is always benefitial for complicated cases.
2974
2975 Case 2)
2976 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2977 (for cmp (lt le gt ge eq)
2978 (simplify
2979 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2980 (with
2981 {
2982 tree from_type = TREE_TYPE (@1);
2983 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2984 enum tree_code code = ERROR_MARK;
2985
2986 if (INTEGRAL_TYPE_P (from_type)
2987 && int_fits_type_p (@2, from_type)
2988 && (types_match (c1_type, from_type)
2989 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2990 && (TYPE_UNSIGNED (from_type)
2991 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2992 && (types_match (c2_type, from_type)
2993 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2994 && (TYPE_UNSIGNED (from_type)
2995 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2996 {
2997 if (cmp != EQ_EXPR)
2998 {
2999 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
3000 {
3001 /* X <= Y - 1 equals to X < Y. */
3002 if (cmp == LE_EXPR)
3003 code = LT_EXPR;
3004 /* X > Y - 1 equals to X >= Y. */
3005 if (cmp == GT_EXPR)
3006 code = GE_EXPR;
3007 }
3008 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
3009 {
3010 /* X < Y + 1 equals to X <= Y. */
3011 if (cmp == LT_EXPR)
3012 code = LE_EXPR;
3013 /* X >= Y + 1 equals to X > Y. */
3014 if (cmp == GE_EXPR)
3015 code = GT_EXPR;
3016 }
3017 if (code != ERROR_MARK
3018 || wi::to_widest (@2) == wi::to_widest (@3))
3019 {
3020 if (cmp == LT_EXPR || cmp == LE_EXPR)
3021 code = MIN_EXPR;
3022 if (cmp == GT_EXPR || cmp == GE_EXPR)
3023 code = MAX_EXPR;
3024 }
3025 }
3026 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
3027 else if (int_fits_type_p (@3, from_type))
3028 code = EQ_EXPR;
3029 }
3030 }
3031 (if (code == MAX_EXPR)
3032 (convert (max @1 (convert @2)))
3033 (if (code == MIN_EXPR)
3034 (convert (min @1 (convert @2)))
3035 (if (code == EQ_EXPR)
3036 (convert (cond (eq @1 (convert @3))
3037 (convert:from_type @3) (convert:from_type @2)))))))))
3038
3039 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
3040
3041 1) OP is PLUS or MINUS.
3042 2) CMP is LT, LE, GT or GE.
3043 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
3044
3045 This pattern also handles special cases like:
3046
3047 A) Operand x is a unsigned to signed type conversion and c1 is
3048 integer zero. In this case,
3049 (signed type)x < 0 <=> x > MAX_VAL(signed type)
3050 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
3051 B) Const c1 may not equal to (C3 op' C2). In this case we also
3052 check equality for (c1+1) and (c1-1) by adjusting comparison
3053 code.
3054
3055 TODO: Though signed type is handled by this pattern, it cannot be
3056 simplified at the moment because C standard requires additional
3057 type promotion. In order to match&simplify it here, the IR needs
3058 to be cleaned up by other optimizers, i.e, VRP. */
3059 (for op (plus minus)
3060 (for cmp (lt le gt ge)
3061 (simplify
3062 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
3063 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
3064 (if (types_match (from_type, to_type)
3065 /* Check if it is special case A). */
3066 || (TYPE_UNSIGNED (from_type)
3067 && !TYPE_UNSIGNED (to_type)
3068 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
3069 && integer_zerop (@1)
3070 && (cmp == LT_EXPR || cmp == GE_EXPR)))
3071 (with
3072 {
3073 wi::overflow_type overflow = wi::OVF_NONE;
3074 enum tree_code code, cmp_code = cmp;
3075 wide_int real_c1;
3076 wide_int c1 = wi::to_wide (@1);
3077 wide_int c2 = wi::to_wide (@2);
3078 wide_int c3 = wi::to_wide (@3);
3079 signop sgn = TYPE_SIGN (from_type);
3080
3081 /* Handle special case A), given x of unsigned type:
3082 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
3083 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
3084 if (!types_match (from_type, to_type))
3085 {
3086 if (cmp_code == LT_EXPR)
3087 cmp_code = GT_EXPR;
3088 if (cmp_code == GE_EXPR)
3089 cmp_code = LE_EXPR;
3090 c1 = wi::max_value (to_type);
3091 }
3092 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
3093 compute (c3 op' c2) and check if it equals to c1 with op' being
3094 the inverted operator of op. Make sure overflow doesn't happen
3095 if it is undefined. */
3096 if (op == PLUS_EXPR)
3097 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3098 else
3099 real_c1 = wi::add (c3, c2, sgn, &overflow);
3100
3101 code = cmp_code;
3102 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3103 {
3104 /* Check if c1 equals to real_c1. Boundary condition is handled
3105 by adjusting comparison operation if necessary. */
3106 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3107 && !overflow)
3108 {
3109 /* X <= Y - 1 equals to X < Y. */
3110 if (cmp_code == LE_EXPR)
3111 code = LT_EXPR;
3112 /* X > Y - 1 equals to X >= Y. */
3113 if (cmp_code == GT_EXPR)
3114 code = GE_EXPR;
3115 }
3116 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3117 && !overflow)
3118 {
3119 /* X < Y + 1 equals to X <= Y. */
3120 if (cmp_code == LT_EXPR)
3121 code = LE_EXPR;
3122 /* X >= Y + 1 equals to X > Y. */
3123 if (cmp_code == GE_EXPR)
3124 code = GT_EXPR;
3125 }
3126 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3127 {
3128 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3129 code = MIN_EXPR;
3130 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3131 code = MAX_EXPR;
3132 }
3133 }
3134 }
3135 (if (code == MAX_EXPR)
3136 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3137 { wide_int_to_tree (from_type, c2); })
3138 (if (code == MIN_EXPR)
3139 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3140 { wide_int_to_tree (from_type, c2); })))))))))
3141
3142 (for cnd (cond vec_cond)
3143 /* A ? B : (A ? X : C) -> A ? B : C. */
3144 (simplify
3145 (cnd @0 (cnd @0 @1 @2) @3)
3146 (cnd @0 @1 @3))
3147 (simplify
3148 (cnd @0 @1 (cnd @0 @2 @3))
3149 (cnd @0 @1 @3))
3150 /* A ? B : (!A ? C : X) -> A ? B : C. */
3151 /* ??? This matches embedded conditions open-coded because genmatch
3152 would generate matching code for conditions in separate stmts only.
3153 The following is still important to merge then and else arm cases
3154 from if-conversion. */
3155 (simplify
3156 (cnd @0 @1 (cnd @2 @3 @4))
3157 (if (inverse_conditions_p (@0, @2))
3158 (cnd @0 @1 @3)))
3159 (simplify
3160 (cnd @0 (cnd @1 @2 @3) @4)
3161 (if (inverse_conditions_p (@0, @1))
3162 (cnd @0 @3 @4)))
3163
3164 /* A ? B : B -> B. */
3165 (simplify
3166 (cnd @0 @1 @1)
3167 @1)
3168
3169 /* !A ? B : C -> A ? C : B. */
3170 (simplify
3171 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3172 (cnd @0 @2 @1)))
3173
3174 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3175 return all -1 or all 0 results. */
3176 /* ??? We could instead convert all instances of the vec_cond to negate,
3177 but that isn't necessarily a win on its own. */
3178 (simplify
3179 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3180 (if (VECTOR_TYPE_P (type)
3181 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3182 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3183 && (TYPE_MODE (TREE_TYPE (type))
3184 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3185 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3186
3187 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
3188 (simplify
3189 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3190 (if (VECTOR_TYPE_P (type)
3191 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3192 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3193 && (TYPE_MODE (TREE_TYPE (type))
3194 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3195 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3196
3197
3198 /* Simplifications of comparisons. */
3199
3200 /* See if we can reduce the magnitude of a constant involved in a
3201 comparison by changing the comparison code. This is a canonicalization
3202 formerly done by maybe_canonicalize_comparison_1. */
3203 (for cmp (le gt)
3204 acmp (lt ge)
3205 (simplify
3206 (cmp @0 uniform_integer_cst_p@1)
3207 (with { tree cst = uniform_integer_cst_p (@1); }
3208 (if (tree_int_cst_sgn (cst) == -1)
3209 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3210 wide_int_to_tree (TREE_TYPE (cst),
3211 wi::to_wide (cst)
3212 + 1)); })))))
3213 (for cmp (ge lt)
3214 acmp (gt le)
3215 (simplify
3216 (cmp @0 uniform_integer_cst_p@1)
3217 (with { tree cst = uniform_integer_cst_p (@1); }
3218 (if (tree_int_cst_sgn (cst) == 1)
3219 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3220 wide_int_to_tree (TREE_TYPE (cst),
3221 wi::to_wide (cst) - 1)); })))))
3222
3223 /* We can simplify a logical negation of a comparison to the
3224 inverted comparison. As we cannot compute an expression
3225 operator using invert_tree_comparison we have to simulate
3226 that with expression code iteration. */
3227 (for cmp (tcc_comparison)
3228 icmp (inverted_tcc_comparison)
3229 ncmp (inverted_tcc_comparison_with_nans)
3230 /* Ideally we'd like to combine the following two patterns
3231 and handle some more cases by using
3232 (logical_inverted_value (cmp @0 @1))
3233 here but for that genmatch would need to "inline" that.
3234 For now implement what forward_propagate_comparison did. */
3235 (simplify
3236 (bit_not (cmp @0 @1))
3237 (if (VECTOR_TYPE_P (type)
3238 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3239 /* Comparison inversion may be impossible for trapping math,
3240 invert_tree_comparison will tell us. But we can't use
3241 a computed operator in the replacement tree thus we have
3242 to play the trick below. */
3243 (with { enum tree_code ic = invert_tree_comparison
3244 (cmp, HONOR_NANS (@0)); }
3245 (if (ic == icmp)
3246 (icmp @0 @1)
3247 (if (ic == ncmp)
3248 (ncmp @0 @1))))))
3249 (simplify
3250 (bit_xor (cmp @0 @1) integer_truep)
3251 (with { enum tree_code ic = invert_tree_comparison
3252 (cmp, HONOR_NANS (@0)); }
3253 (if (ic == icmp)
3254 (icmp @0 @1)
3255 (if (ic == ncmp)
3256 (ncmp @0 @1))))))
3257
3258 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3259 ??? The transformation is valid for the other operators if overflow
3260 is undefined for the type, but performing it here badly interacts
3261 with the transformation in fold_cond_expr_with_comparison which
3262 attempts to synthetize ABS_EXPR. */
3263 (for cmp (eq ne)
3264 (for sub (minus pointer_diff)
3265 (simplify
3266 (cmp (sub@2 @0 @1) integer_zerop)
3267 (if (single_use (@2))
3268 (cmp @0 @1)))))
3269
3270 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3271 signed arithmetic case. That form is created by the compiler
3272 often enough for folding it to be of value. One example is in
3273 computing loop trip counts after Operator Strength Reduction. */
3274 (for cmp (simple_comparison)
3275 scmp (swapped_simple_comparison)
3276 (simplify
3277 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3278 /* Handle unfolded multiplication by zero. */
3279 (if (integer_zerop (@1))
3280 (cmp @1 @2)
3281 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3282 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3283 && single_use (@3))
3284 /* If @1 is negative we swap the sense of the comparison. */
3285 (if (tree_int_cst_sgn (@1) < 0)
3286 (scmp @0 @2)
3287 (cmp @0 @2))))))
3288
3289 /* Simplify comparison of something with itself. For IEEE
3290 floating-point, we can only do some of these simplifications. */
3291 (for cmp (eq ge le)
3292 (simplify
3293 (cmp @0 @0)
3294 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3295 || ! HONOR_NANS (@0))
3296 { constant_boolean_node (true, type); }
3297 (if (cmp != EQ_EXPR)
3298 (eq @0 @0)))))
3299 (for cmp (ne gt lt)
3300 (simplify
3301 (cmp @0 @0)
3302 (if (cmp != NE_EXPR
3303 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3304 || ! HONOR_NANS (@0))
3305 { constant_boolean_node (false, type); })))
3306 (for cmp (unle unge uneq)
3307 (simplify
3308 (cmp @0 @0)
3309 { constant_boolean_node (true, type); }))
3310 (for cmp (unlt ungt)
3311 (simplify
3312 (cmp @0 @0)
3313 (unordered @0 @0)))
3314 (simplify
3315 (ltgt @0 @0)
3316 (if (!flag_trapping_math)
3317 { constant_boolean_node (false, type); }))
3318
3319 /* Fold ~X op ~Y as Y op X. */
3320 (for cmp (simple_comparison)
3321 (simplify
3322 (cmp (bit_not@2 @0) (bit_not@3 @1))
3323 (if (single_use (@2) && single_use (@3))
3324 (cmp @1 @0))))
3325
3326 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3327 (for cmp (simple_comparison)
3328 scmp (swapped_simple_comparison)
3329 (simplify
3330 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3331 (if (single_use (@2)
3332 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3333 (scmp @0 (bit_not @1)))))
3334
3335 (for cmp (simple_comparison)
3336 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3337 (simplify
3338 (cmp (convert@2 @0) (convert? @1))
3339 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3340 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3341 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3342 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3343 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3344 (with
3345 {
3346 tree type1 = TREE_TYPE (@1);
3347 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3348 {
3349 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3350 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3351 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3352 type1 = float_type_node;
3353 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3354 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3355 type1 = double_type_node;
3356 }
3357 tree newtype
3358 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3359 ? TREE_TYPE (@0) : type1);
3360 }
3361 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3362 (cmp (convert:newtype @0) (convert:newtype @1))))))
3363
3364 (simplify
3365 (cmp @0 REAL_CST@1)
3366 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3367 (switch
3368 /* a CMP (-0) -> a CMP 0 */
3369 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3370 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3371 /* x != NaN is always true, other ops are always false. */
3372 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3373 && ! HONOR_SNANS (@1))
3374 { constant_boolean_node (cmp == NE_EXPR, type); })
3375 /* Fold comparisons against infinity. */
3376 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3377 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3378 (with
3379 {
3380 REAL_VALUE_TYPE max;
3381 enum tree_code code = cmp;
3382 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3383 if (neg)
3384 code = swap_tree_comparison (code);
3385 }
3386 (switch
3387 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3388 (if (code == GT_EXPR
3389 && !(HONOR_NANS (@0) && flag_trapping_math))
3390 { constant_boolean_node (false, type); })
3391 (if (code == LE_EXPR)
3392 /* x <= +Inf is always true, if we don't care about NaNs. */
3393 (if (! HONOR_NANS (@0))
3394 { constant_boolean_node (true, type); }
3395 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3396 an "invalid" exception. */
3397 (if (!flag_trapping_math)
3398 (eq @0 @0))))
3399 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3400 for == this introduces an exception for x a NaN. */
3401 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3402 || code == GE_EXPR)
3403 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3404 (if (neg)
3405 (lt @0 { build_real (TREE_TYPE (@0), max); })
3406 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3407 /* x < +Inf is always equal to x <= DBL_MAX. */
3408 (if (code == LT_EXPR)
3409 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3410 (if (neg)
3411 (ge @0 { build_real (TREE_TYPE (@0), max); })
3412 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3413 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3414 an exception for x a NaN so use an unordered comparison. */
3415 (if (code == NE_EXPR)
3416 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3417 (if (! HONOR_NANS (@0))
3418 (if (neg)
3419 (ge @0 { build_real (TREE_TYPE (@0), max); })
3420 (le @0 { build_real (TREE_TYPE (@0), max); }))
3421 (if (neg)
3422 (unge @0 { build_real (TREE_TYPE (@0), max); })
3423 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3424
3425 /* If this is a comparison of a real constant with a PLUS_EXPR
3426 or a MINUS_EXPR of a real constant, we can convert it into a
3427 comparison with a revised real constant as long as no overflow
3428 occurs when unsafe_math_optimizations are enabled. */
3429 (if (flag_unsafe_math_optimizations)
3430 (for op (plus minus)
3431 (simplify
3432 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3433 (with
3434 {
3435 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3436 TREE_TYPE (@1), @2, @1);
3437 }
3438 (if (tem && !TREE_OVERFLOW (tem))
3439 (cmp @0 { tem; }))))))
3440
3441 /* Likewise, we can simplify a comparison of a real constant with
3442 a MINUS_EXPR whose first operand is also a real constant, i.e.
3443 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3444 floating-point types only if -fassociative-math is set. */
3445 (if (flag_associative_math)
3446 (simplify
3447 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3448 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3449 (if (tem && !TREE_OVERFLOW (tem))
3450 (cmp { tem; } @1)))))
3451
3452 /* Fold comparisons against built-in math functions. */
3453 (if (flag_unsafe_math_optimizations
3454 && ! flag_errno_math)
3455 (for sq (SQRT)
3456 (simplify
3457 (cmp (sq @0) REAL_CST@1)
3458 (switch
3459 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3460 (switch
3461 /* sqrt(x) < y is always false, if y is negative. */
3462 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3463 { constant_boolean_node (false, type); })
3464 /* sqrt(x) > y is always true, if y is negative and we
3465 don't care about NaNs, i.e. negative values of x. */
3466 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3467 { constant_boolean_node (true, type); })
3468 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3469 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3470 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3471 (switch
3472 /* sqrt(x) < 0 is always false. */
3473 (if (cmp == LT_EXPR)
3474 { constant_boolean_node (false, type); })
3475 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3476 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3477 { constant_boolean_node (true, type); })
3478 /* sqrt(x) <= 0 -> x == 0. */
3479 (if (cmp == LE_EXPR)
3480 (eq @0 @1))
3481 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3482 == or !=. In the last case:
3483
3484 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3485
3486 if x is negative or NaN. Due to -funsafe-math-optimizations,
3487 the results for other x follow from natural arithmetic. */
3488 (cmp @0 @1)))
3489 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3490 (with
3491 {
3492 REAL_VALUE_TYPE c2;
3493 real_arithmetic (&c2, MULT_EXPR,
3494 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3495 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3496 }
3497 (if (REAL_VALUE_ISINF (c2))
3498 /* sqrt(x) > y is x == +Inf, when y is very large. */
3499 (if (HONOR_INFINITIES (@0))
3500 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3501 { constant_boolean_node (false, type); })
3502 /* sqrt(x) > c is the same as x > c*c. */
3503 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3504 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3505 (with
3506 {
3507 REAL_VALUE_TYPE c2;
3508 real_arithmetic (&c2, MULT_EXPR,
3509 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3510 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3511 }
3512 (if (REAL_VALUE_ISINF (c2))
3513 (switch
3514 /* sqrt(x) < y is always true, when y is a very large
3515 value and we don't care about NaNs or Infinities. */
3516 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3517 { constant_boolean_node (true, type); })
3518 /* sqrt(x) < y is x != +Inf when y is very large and we
3519 don't care about NaNs. */
3520 (if (! HONOR_NANS (@0))
3521 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3522 /* sqrt(x) < y is x >= 0 when y is very large and we
3523 don't care about Infinities. */
3524 (if (! HONOR_INFINITIES (@0))
3525 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3526 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3527 (if (GENERIC)
3528 (truth_andif
3529 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3530 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3531 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3532 (if (! HONOR_NANS (@0))
3533 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3534 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3535 (if (GENERIC)
3536 (truth_andif
3537 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3538 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3539 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3540 (simplify
3541 (cmp (sq @0) (sq @1))
3542 (if (! HONOR_NANS (@0))
3543 (cmp @0 @1))))))
3544
3545 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
3546 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3547 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
3548 (simplify
3549 (cmp (float@0 @1) (float @2))
3550 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3551 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3552 (with
3553 {
3554 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3555 tree type1 = TREE_TYPE (@1);
3556 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3557 tree type2 = TREE_TYPE (@2);
3558 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3559 }
3560 (if (fmt.can_represent_integral_type_p (type1)
3561 && fmt.can_represent_integral_type_p (type2))
3562 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3563 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3564 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3565 && type1_signed_p >= type2_signed_p)
3566 (icmp @1 (convert @2))
3567 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3568 && type1_signed_p <= type2_signed_p)
3569 (icmp (convert:type2 @1) @2)
3570 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3571 && type1_signed_p == type2_signed_p)
3572 (icmp @1 @2))))))))))
3573
3574 /* Optimize various special cases of (FTYPE) N CMP CST. */
3575 (for cmp (lt le eq ne ge gt)
3576 icmp (le le eq ne ge ge)
3577 (simplify
3578 (cmp (float @0) REAL_CST@1)
3579 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3580 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3581 (with
3582 {
3583 tree itype = TREE_TYPE (@0);
3584 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3585 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3586 /* Be careful to preserve any potential exceptions due to
3587 NaNs. qNaNs are ok in == or != context.
3588 TODO: relax under -fno-trapping-math or
3589 -fno-signaling-nans. */
3590 bool exception_p
3591 = real_isnan (cst) && (cst->signalling
3592 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3593 }
3594 /* TODO: allow non-fitting itype and SNaNs when
3595 -fno-trapping-math. */
3596 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
3597 (with
3598 {
3599 signop isign = TYPE_SIGN (itype);
3600 REAL_VALUE_TYPE imin, imax;
3601 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3602 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3603
3604 REAL_VALUE_TYPE icst;
3605 if (cmp == GT_EXPR || cmp == GE_EXPR)
3606 real_ceil (&icst, fmt, cst);
3607 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3608 real_floor (&icst, fmt, cst);
3609 else
3610 real_trunc (&icst, fmt, cst);
3611
3612 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3613
3614 bool overflow_p = false;
3615 wide_int icst_val
3616 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3617 }
3618 (switch
3619 /* Optimize cases when CST is outside of ITYPE's range. */
3620 (if (real_compare (LT_EXPR, cst, &imin))
3621 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3622 type); })
3623 (if (real_compare (GT_EXPR, cst, &imax))
3624 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3625 type); })
3626 /* Remove cast if CST is an integer representable by ITYPE. */
3627 (if (cst_int_p)
3628 (cmp @0 { gcc_assert (!overflow_p);
3629 wide_int_to_tree (itype, icst_val); })
3630 )
3631 /* When CST is fractional, optimize
3632 (FTYPE) N == CST -> 0
3633 (FTYPE) N != CST -> 1. */
3634 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3635 { constant_boolean_node (cmp == NE_EXPR, type); })
3636 /* Otherwise replace with sensible integer constant. */
3637 (with
3638 {
3639 gcc_checking_assert (!overflow_p);
3640 }
3641 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3642
3643 /* Fold A /[ex] B CMP C to A CMP B * C. */
3644 (for cmp (eq ne)
3645 (simplify
3646 (cmp (exact_div @0 @1) INTEGER_CST@2)
3647 (if (!integer_zerop (@1))
3648 (if (wi::to_wide (@2) == 0)
3649 (cmp @0 @2)
3650 (if (TREE_CODE (@1) == INTEGER_CST)
3651 (with
3652 {
3653 wi::overflow_type ovf;
3654 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3655 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3656 }
3657 (if (ovf)
3658 { constant_boolean_node (cmp == NE_EXPR, type); }
3659 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3660 (for cmp (lt le gt ge)
3661 (simplify
3662 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3663 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3664 (with
3665 {
3666 wi::overflow_type ovf;
3667 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3668 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3669 }
3670 (if (ovf)
3671 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3672 TYPE_SIGN (TREE_TYPE (@2)))
3673 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3674 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3675
3676 /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
3677
3678 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
3679 For large C (more than min/B+2^size), this is also true, with the
3680 multiplication computed modulo 2^size.
3681 For intermediate C, this just tests the sign of A. */
3682 (for cmp (lt le gt ge)
3683 cmp2 (ge ge lt lt)
3684 (simplify
3685 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
3686 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
3687 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
3688 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3689 (with
3690 {
3691 tree utype = TREE_TYPE (@2);
3692 wide_int denom = wi::to_wide (@1);
3693 wide_int right = wi::to_wide (@2);
3694 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
3695 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
3696 bool small = wi::leu_p (right, smax);
3697 bool large = wi::geu_p (right, smin);
3698 }
3699 (if (small || large)
3700 (cmp (convert:utype @0) (mult @2 (convert @1)))
3701 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
3702
3703 /* Unordered tests if either argument is a NaN. */
3704 (simplify
3705 (bit_ior (unordered @0 @0) (unordered @1 @1))
3706 (if (types_match (@0, @1))
3707 (unordered @0 @1)))
3708 (simplify
3709 (bit_and (ordered @0 @0) (ordered @1 @1))
3710 (if (types_match (@0, @1))
3711 (ordered @0 @1)))
3712 (simplify
3713 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3714 @2)
3715 (simplify
3716 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3717 @2)
3718
3719 /* Simple range test simplifications. */
3720 /* A < B || A >= B -> true. */
3721 (for test1 (lt le le le ne ge)
3722 test2 (ge gt ge ne eq ne)
3723 (simplify
3724 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3725 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3726 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3727 { constant_boolean_node (true, type); })))
3728 /* A < B && A >= B -> false. */
3729 (for test1 (lt lt lt le ne eq)
3730 test2 (ge gt eq gt eq gt)
3731 (simplify
3732 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3733 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3734 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3735 { constant_boolean_node (false, type); })))
3736
3737 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3738 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3739
3740 Note that comparisons
3741 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3742 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3743 will be canonicalized to above so there's no need to
3744 consider them here.
3745 */
3746
3747 (for cmp (le gt)
3748 eqcmp (eq ne)
3749 (simplify
3750 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3751 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3752 (with
3753 {
3754 tree ty = TREE_TYPE (@0);
3755 unsigned prec = TYPE_PRECISION (ty);
3756 wide_int mask = wi::to_wide (@2, prec);
3757 wide_int rhs = wi::to_wide (@3, prec);
3758 signop sgn = TYPE_SIGN (ty);
3759 }
3760 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3761 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3762 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3763 { build_zero_cst (ty); }))))))
3764
3765 /* -A CMP -B -> B CMP A. */
3766 (for cmp (tcc_comparison)
3767 scmp (swapped_tcc_comparison)
3768 (simplify
3769 (cmp (negate @0) (negate @1))
3770 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3771 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3772 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3773 (scmp @0 @1)))
3774 (simplify
3775 (cmp (negate @0) CONSTANT_CLASS_P@1)
3776 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3777 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3778 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3779 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3780 (if (tem && !TREE_OVERFLOW (tem))
3781 (scmp @0 { tem; }))))))
3782
3783 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3784 (for op (eq ne)
3785 (simplify
3786 (op (abs @0) zerop@1)
3787 (op @0 @1)))
3788
3789 /* From fold_sign_changed_comparison and fold_widened_comparison.
3790 FIXME: the lack of symmetry is disturbing. */
3791 (for cmp (simple_comparison)
3792 (simplify
3793 (cmp (convert@0 @00) (convert?@1 @10))
3794 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3795 /* Disable this optimization if we're casting a function pointer
3796 type on targets that require function pointer canonicalization. */
3797 && !(targetm.have_canonicalize_funcptr_for_compare ()
3798 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3799 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3800 || (POINTER_TYPE_P (TREE_TYPE (@10))
3801 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
3802 && single_use (@0))
3803 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3804 && (TREE_CODE (@10) == INTEGER_CST
3805 || @1 != @10)
3806 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3807 || cmp == NE_EXPR
3808 || cmp == EQ_EXPR)
3809 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3810 /* ??? The special-casing of INTEGER_CST conversion was in the original
3811 code and here to avoid a spurious overflow flag on the resulting
3812 constant which fold_convert produces. */
3813 (if (TREE_CODE (@1) == INTEGER_CST)
3814 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3815 TREE_OVERFLOW (@1)); })
3816 (cmp @00 (convert @1)))
3817
3818 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3819 /* If possible, express the comparison in the shorter mode. */
3820 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3821 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3822 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3823 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3824 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3825 || ((TYPE_PRECISION (TREE_TYPE (@00))
3826 >= TYPE_PRECISION (TREE_TYPE (@10)))
3827 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3828 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3829 || (TREE_CODE (@10) == INTEGER_CST
3830 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3831 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3832 (cmp @00 (convert @10))
3833 (if (TREE_CODE (@10) == INTEGER_CST
3834 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3835 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3836 (with
3837 {
3838 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3839 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3840 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3841 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3842 }
3843 (if (above || below)
3844 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3845 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3846 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3847 { constant_boolean_node (above ? true : false, type); }
3848 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3849 { constant_boolean_node (above ? false : true, type); }))))))))))))
3850
3851 (for cmp (eq ne)
3852 /* A local variable can never be pointed to by
3853 the default SSA name of an incoming parameter.
3854 SSA names are canonicalized to 2nd place. */
3855 (simplify
3856 (cmp addr@0 SSA_NAME@1)
3857 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3858 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3859 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3860 (if (TREE_CODE (base) == VAR_DECL
3861 && auto_var_in_fn_p (base, current_function_decl))
3862 (if (cmp == NE_EXPR)
3863 { constant_boolean_node (true, type); }
3864 { constant_boolean_node (false, type); }))))))
3865
3866 /* Equality compare simplifications from fold_binary */
3867 (for cmp (eq ne)
3868
3869 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3870 Similarly for NE_EXPR. */
3871 (simplify
3872 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3873 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3874 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3875 { constant_boolean_node (cmp == NE_EXPR, type); }))
3876
3877 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3878 (simplify
3879 (cmp (bit_xor @0 @1) integer_zerop)
3880 (cmp @0 @1))
3881
3882 /* (X ^ Y) == Y becomes X == 0.
3883 Likewise (X ^ Y) == X becomes Y == 0. */
3884 (simplify
3885 (cmp:c (bit_xor:c @0 @1) @0)
3886 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3887
3888 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3889 (simplify
3890 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3891 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3892 (cmp @0 (bit_xor @1 (convert @2)))))
3893
3894 (simplify
3895 (cmp (convert? addr@0) integer_zerop)
3896 (if (tree_single_nonzero_warnv_p (@0, NULL))
3897 { constant_boolean_node (cmp == NE_EXPR, type); })))
3898
3899 /* If we have (A & C) == C where C is a power of 2, convert this into
3900 (A & C) != 0. Similarly for NE_EXPR. */
3901 (for cmp (eq ne)
3902 icmp (ne eq)
3903 (simplify
3904 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3905 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3906
3907 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3908 convert this into a shift followed by ANDing with D. */
3909 (simplify
3910 (cond
3911 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3912 INTEGER_CST@2 integer_zerop)
3913 (if (integer_pow2p (@2))
3914 (with {
3915 int shift = (wi::exact_log2 (wi::to_wide (@2))
3916 - wi::exact_log2 (wi::to_wide (@1)));
3917 }
3918 (if (shift > 0)
3919 (bit_and
3920 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3921 (bit_and
3922 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3923 @2)))))
3924
3925 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3926 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3927 (for cmp (eq ne)
3928 ncmp (ge lt)
3929 (simplify
3930 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3931 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3932 && type_has_mode_precision_p (TREE_TYPE (@0))
3933 && element_precision (@2) >= element_precision (@0)
3934 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3935 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3936 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3937
3938 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3939 this into a right shift or sign extension followed by ANDing with C. */
3940 (simplify
3941 (cond
3942 (lt @0 integer_zerop)
3943 INTEGER_CST@1 integer_zerop)
3944 (if (integer_pow2p (@1)
3945 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
3946 (with {
3947 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3948 }
3949 (if (shift >= 0)
3950 (bit_and
3951 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3952 @1)
3953 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3954 sign extension followed by AND with C will achieve the effect. */
3955 (bit_and (convert @0) @1)))))
3956
3957 /* When the addresses are not directly of decls compare base and offset.
3958 This implements some remaining parts of fold_comparison address
3959 comparisons but still no complete part of it. Still it is good
3960 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3961 (for cmp (simple_comparison)
3962 (simplify
3963 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3964 (with
3965 {
3966 poly_int64 off0, off1;
3967 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3968 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3969 if (base0 && TREE_CODE (base0) == MEM_REF)
3970 {
3971 off0 += mem_ref_offset (base0).force_shwi ();
3972 base0 = TREE_OPERAND (base0, 0);
3973 }
3974 if (base1 && TREE_CODE (base1) == MEM_REF)
3975 {
3976 off1 += mem_ref_offset (base1).force_shwi ();
3977 base1 = TREE_OPERAND (base1, 0);
3978 }
3979 }
3980 (if (base0 && base1)
3981 (with
3982 {
3983 int equal = 2;
3984 /* Punt in GENERIC on variables with value expressions;
3985 the value expressions might point to fields/elements
3986 of other vars etc. */
3987 if (GENERIC
3988 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3989 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3990 ;
3991 else if (decl_in_symtab_p (base0)
3992 && decl_in_symtab_p (base1))
3993 equal = symtab_node::get_create (base0)
3994 ->equal_address_to (symtab_node::get_create (base1));
3995 else if ((DECL_P (base0)
3996 || TREE_CODE (base0) == SSA_NAME
3997 || TREE_CODE (base0) == STRING_CST)
3998 && (DECL_P (base1)
3999 || TREE_CODE (base1) == SSA_NAME
4000 || TREE_CODE (base1) == STRING_CST))
4001 equal = (base0 == base1);
4002 if (equal == 0)
4003 {
4004 HOST_WIDE_INT ioff0 = -1, ioff1 = -1;
4005 off0.is_constant (&ioff0);
4006 off1.is_constant (&ioff1);
4007 if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST)
4008 || (TREE_CODE (base0) == STRING_CST && DECL_P (base1))
4009 || (TREE_CODE (base0) == STRING_CST
4010 && TREE_CODE (base1) == STRING_CST
4011 && ioff0 >= 0 && ioff1 >= 0
4012 && ioff0 < TREE_STRING_LENGTH (base0)
4013 && ioff1 < TREE_STRING_LENGTH (base1)
4014 /* This is a too conservative test that the STRING_CSTs
4015 will not end up being string-merged. */
4016 && strncmp (TREE_STRING_POINTER (base0) + ioff0,
4017 TREE_STRING_POINTER (base1) + ioff1,
4018 MIN (TREE_STRING_LENGTH (base0) - ioff0,
4019 TREE_STRING_LENGTH (base1) - ioff1)) != 0))
4020 ;
4021 else if (!DECL_P (base0) || !DECL_P (base1))
4022 equal = 2;
4023 else if (cmp != EQ_EXPR && cmp != NE_EXPR)
4024 equal = 2;
4025 /* If this is a pointer comparison, ignore for now even
4026 valid equalities where one pointer is the offset zero
4027 of one object and the other to one past end of another one. */
4028 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2)))
4029 ;
4030 /* Assume that automatic variables can't be adjacent to global
4031 variables. */
4032 else if (is_global_var (base0) != is_global_var (base1))
4033 ;
4034 else
4035 {
4036 tree sz0 = DECL_SIZE_UNIT (base0);
4037 tree sz1 = DECL_SIZE_UNIT (base1);
4038 /* If sizes are unknown, e.g. VLA or not representable,
4039 punt. */
4040 if (!tree_fits_poly_int64_p (sz0)
4041 || !tree_fits_poly_int64_p (sz1))
4042 equal = 2;
4043 else
4044 {
4045 poly_int64 size0 = tree_to_poly_int64 (sz0);
4046 poly_int64 size1 = tree_to_poly_int64 (sz1);
4047 /* If one offset is pointing (or could be) to the beginning
4048 of one object and the other is pointing to one past the
4049 last byte of the other object, punt. */
4050 if (maybe_eq (off0, 0) && maybe_eq (off1, size1))
4051 equal = 2;
4052 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0))
4053 equal = 2;
4054 /* If both offsets are the same, there are some cases
4055 we know that are ok. Either if we know they aren't
4056 zero, or if we know both sizes are no zero. */
4057 if (equal == 2
4058 && known_eq (off0, off1)
4059 && (known_ne (off0, 0)
4060 || (known_ne (size0, 0) && known_ne (size1, 0))))
4061 equal = 0;
4062 }
4063 }
4064 }
4065 }
4066 (if (equal == 1
4067 && (cmp == EQ_EXPR || cmp == NE_EXPR
4068 /* If the offsets are equal we can ignore overflow. */
4069 || known_eq (off0, off1)
4070 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
4071 /* Or if we compare using pointers to decls or strings. */
4072 || (POINTER_TYPE_P (TREE_TYPE (@2))
4073 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
4074 (switch
4075 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4076 { constant_boolean_node (known_eq (off0, off1), type); })
4077 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4078 { constant_boolean_node (known_ne (off0, off1), type); })
4079 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
4080 { constant_boolean_node (known_lt (off0, off1), type); })
4081 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
4082 { constant_boolean_node (known_le (off0, off1), type); })
4083 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
4084 { constant_boolean_node (known_ge (off0, off1), type); })
4085 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
4086 { constant_boolean_node (known_gt (off0, off1), type); }))
4087 (if (equal == 0)
4088 (switch
4089 (if (cmp == EQ_EXPR)
4090 { constant_boolean_node (false, type); })
4091 (if (cmp == NE_EXPR)
4092 { constant_boolean_node (true, type); })))))))))
4093
4094 /* Simplify pointer equality compares using PTA. */
4095 (for neeq (ne eq)
4096 (simplify
4097 (neeq @0 @1)
4098 (if (POINTER_TYPE_P (TREE_TYPE (@0))
4099 && ptrs_compare_unequal (@0, @1))
4100 { constant_boolean_node (neeq != EQ_EXPR, type); })))
4101
4102 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
4103 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
4104 Disable the transform if either operand is pointer to function.
4105 This broke pr22051-2.c for arm where function pointer
4106 canonicalizaion is not wanted. */
4107
4108 (for cmp (ne eq)
4109 (simplify
4110 (cmp (convert @0) INTEGER_CST@1)
4111 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
4112 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
4113 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4114 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4115 && POINTER_TYPE_P (TREE_TYPE (@1))
4116 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
4117 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
4118 (cmp @0 (convert @1)))))
4119
4120 /* Non-equality compare simplifications from fold_binary */
4121 (for cmp (lt gt le ge)
4122 /* Comparisons with the highest or lowest possible integer of
4123 the specified precision will have known values. */
4124 (simplify
4125 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
4126 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
4127 || POINTER_TYPE_P (TREE_TYPE (@1))
4128 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
4129 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
4130 (with
4131 {
4132 tree cst = uniform_integer_cst_p (@1);
4133 tree arg1_type = TREE_TYPE (cst);
4134 unsigned int prec = TYPE_PRECISION (arg1_type);
4135 wide_int max = wi::max_value (arg1_type);
4136 wide_int signed_max = wi::max_value (prec, SIGNED);
4137 wide_int min = wi::min_value (arg1_type);
4138 }
4139 (switch
4140 (if (wi::to_wide (cst) == max)
4141 (switch
4142 (if (cmp == GT_EXPR)
4143 { constant_boolean_node (false, type); })
4144 (if (cmp == GE_EXPR)
4145 (eq @2 @1))
4146 (if (cmp == LE_EXPR)
4147 { constant_boolean_node (true, type); })
4148 (if (cmp == LT_EXPR)
4149 (ne @2 @1))))
4150 (if (wi::to_wide (cst) == min)
4151 (switch
4152 (if (cmp == LT_EXPR)
4153 { constant_boolean_node (false, type); })
4154 (if (cmp == LE_EXPR)
4155 (eq @2 @1))
4156 (if (cmp == GE_EXPR)
4157 { constant_boolean_node (true, type); })
4158 (if (cmp == GT_EXPR)
4159 (ne @2 @1))))
4160 (if (wi::to_wide (cst) == max - 1)
4161 (switch
4162 (if (cmp == GT_EXPR)
4163 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4164 wide_int_to_tree (TREE_TYPE (cst),
4165 wi::to_wide (cst)
4166 + 1)); }))
4167 (if (cmp == LE_EXPR)
4168 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4169 wide_int_to_tree (TREE_TYPE (cst),
4170 wi::to_wide (cst)
4171 + 1)); }))))
4172 (if (wi::to_wide (cst) == min + 1)
4173 (switch
4174 (if (cmp == GE_EXPR)
4175 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4176 wide_int_to_tree (TREE_TYPE (cst),
4177 wi::to_wide (cst)
4178 - 1)); }))
4179 (if (cmp == LT_EXPR)
4180 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4181 wide_int_to_tree (TREE_TYPE (cst),
4182 wi::to_wide (cst)
4183 - 1)); }))))
4184 (if (wi::to_wide (cst) == signed_max
4185 && TYPE_UNSIGNED (arg1_type)
4186 /* We will flip the signedness of the comparison operator
4187 associated with the mode of @1, so the sign bit is
4188 specified by this mode. Check that @1 is the signed
4189 max associated with this sign bit. */
4190 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
4191 /* signed_type does not work on pointer types. */
4192 && INTEGRAL_TYPE_P (arg1_type))
4193 /* The following case also applies to X < signed_max+1
4194 and X >= signed_max+1 because previous transformations. */
4195 (if (cmp == LE_EXPR || cmp == GT_EXPR)
4196 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4197 (switch
4198 (if (cst == @1 && cmp == LE_EXPR)
4199 (ge (convert:st @0) { build_zero_cst (st); }))
4200 (if (cst == @1 && cmp == GT_EXPR)
4201 (lt (convert:st @0) { build_zero_cst (st); }))
4202 (if (cmp == LE_EXPR)
4203 (ge (view_convert:st @0) { build_zero_cst (st); }))
4204 (if (cmp == GT_EXPR)
4205 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
4206
4207 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4208 /* If the second operand is NaN, the result is constant. */
4209 (simplify
4210 (cmp @0 REAL_CST@1)
4211 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4212 && (cmp != LTGT_EXPR || ! flag_trapping_math))
4213 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
4214 ? false : true, type); })))
4215
4216 /* bool_var != 0 becomes bool_var. */
4217 (simplify
4218 (ne @0 integer_zerop)
4219 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4220 && types_match (type, TREE_TYPE (@0)))
4221 (non_lvalue @0)))
4222 /* bool_var == 1 becomes bool_var. */
4223 (simplify
4224 (eq @0 integer_onep)
4225 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4226 && types_match (type, TREE_TYPE (@0)))
4227 (non_lvalue @0)))
4228 /* Do not handle
4229 bool_var == 0 becomes !bool_var or
4230 bool_var != 1 becomes !bool_var
4231 here because that only is good in assignment context as long
4232 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4233 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4234 clearly less optimal and which we'll transform again in forwprop. */
4235
4236 /* When one argument is a constant, overflow detection can be simplified.
4237 Currently restricted to single use so as not to interfere too much with
4238 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4239 A + CST CMP A -> A CMP' CST' */
4240 (for cmp (lt le ge gt)
4241 out (gt gt le le)
4242 (simplify
4243 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4244 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4245 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4246 && wi::to_wide (@1) != 0
4247 && single_use (@2))
4248 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4249 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4250 wi::max_value (prec, UNSIGNED)
4251 - wi::to_wide (@1)); })))))
4252
4253 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4254 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4255 expects the long form, so we restrict the transformation for now. */
4256 (for cmp (gt le)
4257 (simplify
4258 (cmp:c (minus@2 @0 @1) @0)
4259 (if (single_use (@2)
4260 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4261 && TYPE_UNSIGNED (TREE_TYPE (@0))
4262 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4263 (cmp @1 @0))))
4264
4265 /* Testing for overflow is unnecessary if we already know the result. */
4266 /* A - B > A */
4267 (for cmp (gt le)
4268 out (ne eq)
4269 (simplify
4270 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4271 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4272 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4273 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4274 /* A + B < A */
4275 (for cmp (lt ge)
4276 out (ne eq)
4277 (simplify
4278 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4279 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4280 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4281 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4282
4283 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4284 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
4285 (for cmp (lt ge)
4286 out (ne eq)
4287 (simplify
4288 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4289 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4290 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4291 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4292
4293 /* Simplification of math builtins. These rules must all be optimizations
4294 as well as IL simplifications. If there is a possibility that the new
4295 form could be a pessimization, the rule should go in the canonicalization
4296 section that follows this one.
4297
4298 Rules can generally go in this section if they satisfy one of
4299 the following:
4300
4301 - the rule describes an identity
4302
4303 - the rule replaces calls with something as simple as addition or
4304 multiplication
4305
4306 - the rule contains unary calls only and simplifies the surrounding
4307 arithmetic. (The idea here is to exclude non-unary calls in which
4308 one operand is constant and in which the call is known to be cheap
4309 when the operand has that value.) */
4310
4311 (if (flag_unsafe_math_optimizations)
4312 /* Simplify sqrt(x) * sqrt(x) -> x. */
4313 (simplify
4314 (mult (SQRT_ALL@1 @0) @1)
4315 (if (!HONOR_SNANS (type))
4316 @0))
4317
4318 (for op (plus minus)
4319 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4320 (simplify
4321 (op (rdiv @0 @1)
4322 (rdiv @2 @1))
4323 (rdiv (op @0 @2) @1)))
4324
4325 (for cmp (lt le gt ge)
4326 neg_cmp (gt ge lt le)
4327 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4328 (simplify
4329 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4330 (with
4331 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4332 (if (tem
4333 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4334 || (real_zerop (tem) && !real_zerop (@1))))
4335 (switch
4336 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4337 (cmp @0 { tem; }))
4338 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4339 (neg_cmp @0 { tem; })))))))
4340
4341 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4342 (for root (SQRT CBRT)
4343 (simplify
4344 (mult (root:s @0) (root:s @1))
4345 (root (mult @0 @1))))
4346
4347 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4348 (for exps (EXP EXP2 EXP10 POW10)
4349 (simplify
4350 (mult (exps:s @0) (exps:s @1))
4351 (exps (plus @0 @1))))
4352
4353 /* Simplify a/root(b/c) into a*root(c/b). */
4354 (for root (SQRT CBRT)
4355 (simplify
4356 (rdiv @0 (root:s (rdiv:s @1 @2)))
4357 (mult @0 (root (rdiv @2 @1)))))
4358
4359 /* Simplify x/expN(y) into x*expN(-y). */
4360 (for exps (EXP EXP2 EXP10 POW10)
4361 (simplify
4362 (rdiv @0 (exps:s @1))
4363 (mult @0 (exps (negate @1)))))
4364
4365 (for logs (LOG LOG2 LOG10 LOG10)
4366 exps (EXP EXP2 EXP10 POW10)
4367 /* logN(expN(x)) -> x. */
4368 (simplify
4369 (logs (exps @0))
4370 @0)
4371 /* expN(logN(x)) -> x. */
4372 (simplify
4373 (exps (logs @0))
4374 @0))
4375
4376 /* Optimize logN(func()) for various exponential functions. We
4377 want to determine the value "x" and the power "exponent" in
4378 order to transform logN(x**exponent) into exponent*logN(x). */
4379 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4380 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
4381 (simplify
4382 (logs (exps @0))
4383 (if (SCALAR_FLOAT_TYPE_P (type))
4384 (with {
4385 tree x;
4386 switch (exps)
4387 {
4388 CASE_CFN_EXP:
4389 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4390 x = build_real_truncate (type, dconst_e ());
4391 break;
4392 CASE_CFN_EXP2:
4393 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4394 x = build_real (type, dconst2);
4395 break;
4396 CASE_CFN_EXP10:
4397 CASE_CFN_POW10:
4398 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4399 {
4400 REAL_VALUE_TYPE dconst10;
4401 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4402 x = build_real (type, dconst10);
4403 }
4404 break;
4405 default:
4406 gcc_unreachable ();
4407 }
4408 }
4409 (mult (logs { x; }) @0)))))
4410
4411 (for logs (LOG LOG
4412 LOG2 LOG2
4413 LOG10 LOG10)
4414 exps (SQRT CBRT)
4415 (simplify
4416 (logs (exps @0))
4417 (if (SCALAR_FLOAT_TYPE_P (type))
4418 (with {
4419 tree x;
4420 switch (exps)
4421 {
4422 CASE_CFN_SQRT:
4423 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4424 x = build_real (type, dconsthalf);
4425 break;
4426 CASE_CFN_CBRT:
4427 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4428 x = build_real_truncate (type, dconst_third ());
4429 break;
4430 default:
4431 gcc_unreachable ();
4432 }
4433 }
4434 (mult { x; } (logs @0))))))
4435
4436 /* logN(pow(x,exponent)) -> exponent*logN(x). */
4437 (for logs (LOG LOG2 LOG10)
4438 pows (POW)
4439 (simplify
4440 (logs (pows @0 @1))
4441 (mult @1 (logs @0))))
4442
4443 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4444 or if C is a positive power of 2,
4445 pow(C,x) -> exp2(log2(C)*x). */
4446 #if GIMPLE
4447 (for pows (POW)
4448 exps (EXP)
4449 logs (LOG)
4450 exp2s (EXP2)
4451 log2s (LOG2)
4452 (simplify
4453 (pows REAL_CST@0 @1)
4454 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4455 && real_isfinite (TREE_REAL_CST_PTR (@0))
4456 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4457 the use_exp2 case until after vectorization. It seems actually
4458 beneficial for all constants to postpone this until later,
4459 because exp(log(C)*x), while faster, will have worse precision
4460 and if x folds into a constant too, that is unnecessary
4461 pessimization. */
4462 && canonicalize_math_after_vectorization_p ())
4463 (with {
4464 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4465 bool use_exp2 = false;
4466 if (targetm.libc_has_function (function_c99_misc)
4467 && value->cl == rvc_normal)
4468 {
4469 REAL_VALUE_TYPE frac_rvt = *value;
4470 SET_REAL_EXP (&frac_rvt, 1);
4471 if (real_equal (&frac_rvt, &dconst1))
4472 use_exp2 = true;
4473 }
4474 }
4475 (if (!use_exp2)
4476 (if (optimize_pow_to_exp (@0, @1))
4477 (exps (mult (logs @0) @1)))
4478 (exp2s (mult (log2s @0) @1)))))))
4479 #endif
4480
4481 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4482 (for pows (POW)
4483 exps (EXP EXP2 EXP10 POW10)
4484 logs (LOG LOG2 LOG10 LOG10)
4485 (simplify
4486 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4487 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4488 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4489 (exps (plus (mult (logs @0) @1) @2)))))
4490
4491 (for sqrts (SQRT)
4492 cbrts (CBRT)
4493 pows (POW)
4494 exps (EXP EXP2 EXP10 POW10)
4495 /* sqrt(expN(x)) -> expN(x*0.5). */
4496 (simplify
4497 (sqrts (exps @0))
4498 (exps (mult @0 { build_real (type, dconsthalf); })))
4499 /* cbrt(expN(x)) -> expN(x/3). */
4500 (simplify
4501 (cbrts (exps @0))
4502 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4503 /* pow(expN(x), y) -> expN(x*y). */
4504 (simplify
4505 (pows (exps @0) @1)
4506 (exps (mult @0 @1))))
4507
4508 /* tan(atan(x)) -> x. */
4509 (for tans (TAN)
4510 atans (ATAN)
4511 (simplify
4512 (tans (atans @0))
4513 @0)))
4514
4515 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4516 (for sins (SIN)
4517 atans (ATAN)
4518 sqrts (SQRT)
4519 copysigns (COPYSIGN)
4520 (simplify
4521 (sins (atans:s @0))
4522 (with
4523 {
4524 REAL_VALUE_TYPE r_cst;
4525 build_sinatan_real (&r_cst, type);
4526 tree t_cst = build_real (type, r_cst);
4527 tree t_one = build_one_cst (type);
4528 }
4529 (if (SCALAR_FLOAT_TYPE_P (type))
4530 (cond (lt (abs @0) { t_cst; })
4531 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4532 (copysigns { t_one; } @0))))))
4533
4534 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4535 (for coss (COS)
4536 atans (ATAN)
4537 sqrts (SQRT)
4538 copysigns (COPYSIGN)
4539 (simplify
4540 (coss (atans:s @0))
4541 (with
4542 {
4543 REAL_VALUE_TYPE r_cst;
4544 build_sinatan_real (&r_cst, type);
4545 tree t_cst = build_real (type, r_cst);
4546 tree t_one = build_one_cst (type);
4547 tree t_zero = build_zero_cst (type);
4548 }
4549 (if (SCALAR_FLOAT_TYPE_P (type))
4550 (cond (lt (abs @0) { t_cst; })
4551 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4552 (copysigns { t_zero; } @0))))))
4553
4554 (if (!flag_errno_math)
4555 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
4556 (for sinhs (SINH)
4557 atanhs (ATANH)
4558 sqrts (SQRT)
4559 (simplify
4560 (sinhs (atanhs:s @0))
4561 (with { tree t_one = build_one_cst (type); }
4562 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
4563
4564 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
4565 (for coshs (COSH)
4566 atanhs (ATANH)
4567 sqrts (SQRT)
4568 (simplify
4569 (coshs (atanhs:s @0))
4570 (with { tree t_one = build_one_cst (type); }
4571 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
4572
4573 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4574 (simplify
4575 (CABS (complex:C @0 real_zerop@1))
4576 (abs @0))
4577
4578 /* trunc(trunc(x)) -> trunc(x), etc. */
4579 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4580 (simplify
4581 (fns (fns @0))
4582 (fns @0)))
4583 /* f(x) -> x if x is integer valued and f does nothing for such values. */
4584 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4585 (simplify
4586 (fns integer_valued_real_p@0)
4587 @0))
4588
4589 /* hypot(x,0) and hypot(0,x) -> abs(x). */
4590 (simplify
4591 (HYPOT:c @0 real_zerop@1)
4592 (abs @0))
4593
4594 /* pow(1,x) -> 1. */
4595 (simplify
4596 (POW real_onep@0 @1)
4597 @0)
4598
4599 (simplify
4600 /* copysign(x,x) -> x. */
4601 (COPYSIGN_ALL @0 @0)
4602 @0)
4603
4604 (simplify
4605 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
4606 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
4607 (abs @0))
4608
4609 (for scale (LDEXP SCALBN SCALBLN)
4610 /* ldexp(0, x) -> 0. */
4611 (simplify
4612 (scale real_zerop@0 @1)
4613 @0)
4614 /* ldexp(x, 0) -> x. */
4615 (simplify
4616 (scale @0 integer_zerop@1)
4617 @0)
4618 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4619 (simplify
4620 (scale REAL_CST@0 @1)
4621 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4622 @0)))
4623
4624 /* Canonicalization of sequences of math builtins. These rules represent
4625 IL simplifications but are not necessarily optimizations.
4626
4627 The sincos pass is responsible for picking "optimal" implementations
4628 of math builtins, which may be more complicated and can sometimes go
4629 the other way, e.g. converting pow into a sequence of sqrts.
4630 We only want to do these canonicalizations before the pass has run. */
4631
4632 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4633 /* Simplify tan(x) * cos(x) -> sin(x). */
4634 (simplify
4635 (mult:c (TAN:s @0) (COS:s @0))
4636 (SIN @0))
4637
4638 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4639 (simplify
4640 (mult:c @0 (POW:s @0 REAL_CST@1))
4641 (if (!TREE_OVERFLOW (@1))
4642 (POW @0 (plus @1 { build_one_cst (type); }))))
4643
4644 /* Simplify sin(x) / cos(x) -> tan(x). */
4645 (simplify
4646 (rdiv (SIN:s @0) (COS:s @0))
4647 (TAN @0))
4648
4649 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4650 (simplify
4651 (rdiv (COS:s @0) (SIN:s @0))
4652 (rdiv { build_one_cst (type); } (TAN @0)))
4653
4654 /* Simplify sin(x) / tan(x) -> cos(x). */
4655 (simplify
4656 (rdiv (SIN:s @0) (TAN:s @0))
4657 (if (! HONOR_NANS (@0)
4658 && ! HONOR_INFINITIES (@0))
4659 (COS @0)))
4660
4661 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4662 (simplify
4663 (rdiv (TAN:s @0) (SIN:s @0))
4664 (if (! HONOR_NANS (@0)
4665 && ! HONOR_INFINITIES (@0))
4666 (rdiv { build_one_cst (type); } (COS @0))))
4667
4668 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4669 (simplify
4670 (mult (POW:s @0 @1) (POW:s @0 @2))
4671 (POW @0 (plus @1 @2)))
4672
4673 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4674 (simplify
4675 (mult (POW:s @0 @1) (POW:s @2 @1))
4676 (POW (mult @0 @2) @1))
4677
4678 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4679 (simplify
4680 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4681 (POWI (mult @0 @2) @1))
4682
4683 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4684 (simplify
4685 (rdiv (POW:s @0 REAL_CST@1) @0)
4686 (if (!TREE_OVERFLOW (@1))
4687 (POW @0 (minus @1 { build_one_cst (type); }))))
4688
4689 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4690 (simplify
4691 (rdiv @0 (POW:s @1 @2))
4692 (mult @0 (POW @1 (negate @2))))
4693
4694 (for sqrts (SQRT)
4695 cbrts (CBRT)
4696 pows (POW)
4697 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4698 (simplify
4699 (sqrts (sqrts @0))
4700 (pows @0 { build_real (type, dconst_quarter ()); }))
4701 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4702 (simplify
4703 (sqrts (cbrts @0))
4704 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4705 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4706 (simplify
4707 (cbrts (sqrts @0))
4708 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4709 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4710 (simplify
4711 (cbrts (cbrts tree_expr_nonnegative_p@0))
4712 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4713 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4714 (simplify
4715 (sqrts (pows @0 @1))
4716 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4717 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4718 (simplify
4719 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4720 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4721 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4722 (simplify
4723 (pows (sqrts @0) @1)
4724 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4725 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4726 (simplify
4727 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4728 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4729 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4730 (simplify
4731 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4732 (pows @0 (mult @1 @2))))
4733
4734 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4735 (simplify
4736 (CABS (complex @0 @0))
4737 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4738
4739 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4740 (simplify
4741 (HYPOT @0 @0)
4742 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4743
4744 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4745 (for cexps (CEXP)
4746 exps (EXP)
4747 cexpis (CEXPI)
4748 (simplify
4749 (cexps compositional_complex@0)
4750 (if (targetm.libc_has_function (function_c99_math_complex))
4751 (complex
4752 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4753 (mult @1 (imagpart @2)))))))
4754
4755 (if (canonicalize_math_p ())
4756 /* floor(x) -> trunc(x) if x is nonnegative. */
4757 (for floors (FLOOR_ALL)
4758 truncs (TRUNC_ALL)
4759 (simplify
4760 (floors tree_expr_nonnegative_p@0)
4761 (truncs @0))))
4762
4763 (match double_value_p
4764 @0
4765 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4766 (for froms (BUILT_IN_TRUNCL
4767 BUILT_IN_FLOORL
4768 BUILT_IN_CEILL
4769 BUILT_IN_ROUNDL
4770 BUILT_IN_NEARBYINTL
4771 BUILT_IN_RINTL)
4772 tos (BUILT_IN_TRUNC
4773 BUILT_IN_FLOOR
4774 BUILT_IN_CEIL
4775 BUILT_IN_ROUND
4776 BUILT_IN_NEARBYINT
4777 BUILT_IN_RINT)
4778 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4779 (if (optimize && canonicalize_math_p ())
4780 (simplify
4781 (froms (convert double_value_p@0))
4782 (convert (tos @0)))))
4783
4784 (match float_value_p
4785 @0
4786 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4787 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4788 BUILT_IN_FLOORL BUILT_IN_FLOOR
4789 BUILT_IN_CEILL BUILT_IN_CEIL
4790 BUILT_IN_ROUNDL BUILT_IN_ROUND
4791 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4792 BUILT_IN_RINTL BUILT_IN_RINT)
4793 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4794 BUILT_IN_FLOORF BUILT_IN_FLOORF
4795 BUILT_IN_CEILF BUILT_IN_CEILF
4796 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4797 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4798 BUILT_IN_RINTF BUILT_IN_RINTF)
4799 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4800 if x is a float. */
4801 (if (optimize && canonicalize_math_p ()
4802 && targetm.libc_has_function (function_c99_misc))
4803 (simplify
4804 (froms (convert float_value_p@0))
4805 (convert (tos @0)))))
4806
4807 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4808 tos (XFLOOR XCEIL XROUND XRINT)
4809 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4810 (if (optimize && canonicalize_math_p ())
4811 (simplify
4812 (froms (convert double_value_p@0))
4813 (tos @0))))
4814
4815 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4816 XFLOOR XCEIL XROUND XRINT)
4817 tos (XFLOORF XCEILF XROUNDF XRINTF)
4818 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4819 if x is a float. */
4820 (if (optimize && canonicalize_math_p ())
4821 (simplify
4822 (froms (convert float_value_p@0))
4823 (tos @0))))
4824
4825 (if (canonicalize_math_p ())
4826 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4827 (for floors (IFLOOR LFLOOR LLFLOOR)
4828 (simplify
4829 (floors tree_expr_nonnegative_p@0)
4830 (fix_trunc @0))))
4831
4832 (if (canonicalize_math_p ())
4833 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4834 (for fns (IFLOOR LFLOOR LLFLOOR
4835 ICEIL LCEIL LLCEIL
4836 IROUND LROUND LLROUND)
4837 (simplify
4838 (fns integer_valued_real_p@0)
4839 (fix_trunc @0)))
4840 (if (!flag_errno_math)
4841 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4842 (for rints (IRINT LRINT LLRINT)
4843 (simplify
4844 (rints integer_valued_real_p@0)
4845 (fix_trunc @0)))))
4846
4847 (if (canonicalize_math_p ())
4848 (for ifn (IFLOOR ICEIL IROUND IRINT)
4849 lfn (LFLOOR LCEIL LROUND LRINT)
4850 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4851 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4852 sizeof (int) == sizeof (long). */
4853 (if (TYPE_PRECISION (integer_type_node)
4854 == TYPE_PRECISION (long_integer_type_node))
4855 (simplify
4856 (ifn @0)
4857 (lfn:long_integer_type_node @0)))
4858 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4859 sizeof (long long) == sizeof (long). */
4860 (if (TYPE_PRECISION (long_long_integer_type_node)
4861 == TYPE_PRECISION (long_integer_type_node))
4862 (simplify
4863 (llfn @0)
4864 (lfn:long_integer_type_node @0)))))
4865
4866 /* cproj(x) -> x if we're ignoring infinities. */
4867 (simplify
4868 (CPROJ @0)
4869 (if (!HONOR_INFINITIES (type))
4870 @0))
4871
4872 /* If the real part is inf and the imag part is known to be
4873 nonnegative, return (inf + 0i). */
4874 (simplify
4875 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4876 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4877 { build_complex_inf (type, false); }))
4878
4879 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4880 (simplify
4881 (CPROJ (complex @0 REAL_CST@1))
4882 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4883 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4884
4885 (for pows (POW)
4886 sqrts (SQRT)
4887 cbrts (CBRT)
4888 (simplify
4889 (pows @0 REAL_CST@1)
4890 (with {
4891 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4892 REAL_VALUE_TYPE tmp;
4893 }
4894 (switch
4895 /* pow(x,0) -> 1. */
4896 (if (real_equal (value, &dconst0))
4897 { build_real (type, dconst1); })
4898 /* pow(x,1) -> x. */
4899 (if (real_equal (value, &dconst1))
4900 @0)
4901 /* pow(x,-1) -> 1/x. */
4902 (if (real_equal (value, &dconstm1))
4903 (rdiv { build_real (type, dconst1); } @0))
4904 /* pow(x,0.5) -> sqrt(x). */
4905 (if (flag_unsafe_math_optimizations
4906 && canonicalize_math_p ()
4907 && real_equal (value, &dconsthalf))
4908 (sqrts @0))
4909 /* pow(x,1/3) -> cbrt(x). */
4910 (if (flag_unsafe_math_optimizations
4911 && canonicalize_math_p ()
4912 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4913 real_equal (value, &tmp)))
4914 (cbrts @0))))))
4915
4916 /* powi(1,x) -> 1. */
4917 (simplify
4918 (POWI real_onep@0 @1)
4919 @0)
4920
4921 (simplify
4922 (POWI @0 INTEGER_CST@1)
4923 (switch
4924 /* powi(x,0) -> 1. */
4925 (if (wi::to_wide (@1) == 0)
4926 { build_real (type, dconst1); })
4927 /* powi(x,1) -> x. */
4928 (if (wi::to_wide (@1) == 1)
4929 @0)
4930 /* powi(x,-1) -> 1/x. */
4931 (if (wi::to_wide (@1) == -1)
4932 (rdiv { build_real (type, dconst1); } @0))))
4933
4934 /* Narrowing of arithmetic and logical operations.
4935
4936 These are conceptually similar to the transformations performed for
4937 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4938 term we want to move all that code out of the front-ends into here. */
4939
4940 /* If we have a narrowing conversion of an arithmetic operation where
4941 both operands are widening conversions from the same type as the outer
4942 narrowing conversion. Then convert the innermost operands to a suitable
4943 unsigned type (to avoid introducing undefined behavior), perform the
4944 operation and convert the result to the desired type. */
4945 (for op (plus minus)
4946 (simplify
4947 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4948 (if (INTEGRAL_TYPE_P (type)
4949 /* We check for type compatibility between @0 and @1 below,
4950 so there's no need to check that @1/@3 are integral types. */
4951 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4952 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4953 /* The precision of the type of each operand must match the
4954 precision of the mode of each operand, similarly for the
4955 result. */
4956 && type_has_mode_precision_p (TREE_TYPE (@0))
4957 && type_has_mode_precision_p (TREE_TYPE (@1))
4958 && type_has_mode_precision_p (type)
4959 /* The inner conversion must be a widening conversion. */
4960 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4961 && types_match (@0, type)
4962 && (types_match (@0, @1)
4963 /* Or the second operand is const integer or converted const
4964 integer from valueize. */
4965 || TREE_CODE (@1) == INTEGER_CST))
4966 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4967 (op @0 (convert @1))
4968 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4969 (convert (op (convert:utype @0)
4970 (convert:utype @1))))))))
4971
4972 /* This is another case of narrowing, specifically when there's an outer
4973 BIT_AND_EXPR which masks off bits outside the type of the innermost
4974 operands. Like the previous case we have to convert the operands
4975 to unsigned types to avoid introducing undefined behavior for the
4976 arithmetic operation. */
4977 (for op (minus plus)
4978 (simplify
4979 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4980 (if (INTEGRAL_TYPE_P (type)
4981 /* We check for type compatibility between @0 and @1 below,
4982 so there's no need to check that @1/@3 are integral types. */
4983 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4984 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4985 /* The precision of the type of each operand must match the
4986 precision of the mode of each operand, similarly for the
4987 result. */
4988 && type_has_mode_precision_p (TREE_TYPE (@0))
4989 && type_has_mode_precision_p (TREE_TYPE (@1))
4990 && type_has_mode_precision_p (type)
4991 /* The inner conversion must be a widening conversion. */
4992 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4993 && types_match (@0, @1)
4994 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4995 <= TYPE_PRECISION (TREE_TYPE (@0)))
4996 && (wi::to_wide (@4)
4997 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4998 true, TYPE_PRECISION (type))) == 0)
4999 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5000 (with { tree ntype = TREE_TYPE (@0); }
5001 (convert (bit_and (op @0 @1) (convert:ntype @4))))
5002 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5003 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
5004 (convert:utype @4))))))))
5005
5006 /* Transform (@0 < @1 and @0 < @2) to use min,
5007 (@0 > @1 and @0 > @2) to use max */
5008 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
5009 op (lt le gt ge lt le gt ge )
5010 ext (min min max max max max min min )
5011 (simplify
5012 (logic (op:cs @0 @1) (op:cs @0 @2))
5013 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5014 && TREE_CODE (@0) != INTEGER_CST)
5015 (op @0 (ext @1 @2)))))
5016
5017 (simplify
5018 /* signbit(x) -> 0 if x is nonnegative. */
5019 (SIGNBIT tree_expr_nonnegative_p@0)
5020 { integer_zero_node; })
5021
5022 (simplify
5023 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
5024 (SIGNBIT @0)
5025 (if (!HONOR_SIGNED_ZEROS (@0))
5026 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
5027
5028 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
5029 (for cmp (eq ne)
5030 (for op (plus minus)
5031 rop (minus plus)
5032 (simplify
5033 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5034 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5035 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
5036 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
5037 && !TYPE_SATURATING (TREE_TYPE (@0)))
5038 (with { tree res = int_const_binop (rop, @2, @1); }
5039 (if (TREE_OVERFLOW (res)
5040 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5041 { constant_boolean_node (cmp == NE_EXPR, type); }
5042 (if (single_use (@3))
5043 (cmp @0 { TREE_OVERFLOW (res)
5044 ? drop_tree_overflow (res) : res; }))))))))
5045 (for cmp (lt le gt ge)
5046 (for op (plus minus)
5047 rop (minus plus)
5048 (simplify
5049 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5050 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5051 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5052 (with { tree res = int_const_binop (rop, @2, @1); }
5053 (if (TREE_OVERFLOW (res))
5054 {
5055 fold_overflow_warning (("assuming signed overflow does not occur "
5056 "when simplifying conditional to constant"),
5057 WARN_STRICT_OVERFLOW_CONDITIONAL);
5058 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
5059 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
5060 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
5061 TYPE_SIGN (TREE_TYPE (@1)))
5062 != (op == MINUS_EXPR);
5063 constant_boolean_node (less == ovf_high, type);
5064 }
5065 (if (single_use (@3))
5066 (with
5067 {
5068 fold_overflow_warning (("assuming signed overflow does not occur "
5069 "when changing X +- C1 cmp C2 to "
5070 "X cmp C2 -+ C1"),
5071 WARN_STRICT_OVERFLOW_COMPARISON);
5072 }
5073 (cmp @0 { res; })))))))))
5074
5075 /* Canonicalizations of BIT_FIELD_REFs. */
5076
5077 (simplify
5078 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
5079 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
5080
5081 (simplify
5082 (BIT_FIELD_REF (view_convert @0) @1 @2)
5083 (BIT_FIELD_REF @0 @1 @2))
5084
5085 (simplify
5086 (BIT_FIELD_REF @0 @1 integer_zerop)
5087 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
5088 (view_convert @0)))
5089
5090 (simplify
5091 (BIT_FIELD_REF @0 @1 @2)
5092 (switch
5093 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
5094 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5095 (switch
5096 (if (integer_zerop (@2))
5097 (view_convert (realpart @0)))
5098 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5099 (view_convert (imagpart @0)))))
5100 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5101 && INTEGRAL_TYPE_P (type)
5102 /* On GIMPLE this should only apply to register arguments. */
5103 && (! GIMPLE || is_gimple_reg (@0))
5104 /* A bit-field-ref that referenced the full argument can be stripped. */
5105 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
5106 && integer_zerop (@2))
5107 /* Low-parts can be reduced to integral conversions.
5108 ??? The following doesn't work for PDP endian. */
5109 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
5110 /* Don't even think about BITS_BIG_ENDIAN. */
5111 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
5112 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
5113 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
5114 ? (TYPE_PRECISION (TREE_TYPE (@0))
5115 - TYPE_PRECISION (type))
5116 : 0)) == 0)))
5117 (convert @0))))
5118
5119 /* Simplify vector extracts. */
5120
5121 (simplify
5122 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
5123 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
5124 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
5125 || (VECTOR_TYPE_P (type)
5126 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
5127 (with
5128 {
5129 tree ctor = (TREE_CODE (@0) == SSA_NAME
5130 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
5131 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
5132 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
5133 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
5134 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
5135 }
5136 (if (n != 0
5137 && (idx % width) == 0
5138 && (n % width) == 0
5139 && known_le ((idx + n) / width,
5140 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
5141 (with
5142 {
5143 idx = idx / width;
5144 n = n / width;
5145 /* Constructor elements can be subvectors. */
5146 poly_uint64 k = 1;
5147 if (CONSTRUCTOR_NELTS (ctor) != 0)
5148 {
5149 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
5150 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
5151 k = TYPE_VECTOR_SUBPARTS (cons_elem);
5152 }
5153 unsigned HOST_WIDE_INT elt, count, const_k;
5154 }
5155 (switch
5156 /* We keep an exact subset of the constructor elements. */
5157 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
5158 (if (CONSTRUCTOR_NELTS (ctor) == 0)
5159 { build_constructor (type, NULL); }
5160 (if (count == 1)
5161 (if (elt < CONSTRUCTOR_NELTS (ctor))
5162 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
5163 { build_zero_cst (type); })
5164 {
5165 vec<constructor_elt, va_gc> *vals;
5166 vec_alloc (vals, count);
5167 for (unsigned i = 0;
5168 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
5169 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
5170 CONSTRUCTOR_ELT (ctor, elt + i)->value);
5171 build_constructor (type, vals);
5172 })))
5173 /* The bitfield references a single constructor element. */
5174 (if (k.is_constant (&const_k)
5175 && idx + n <= (idx / const_k + 1) * const_k)
5176 (switch
5177 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
5178 { build_zero_cst (type); })
5179 (if (n == const_k)
5180 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
5181 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
5182 @1 { bitsize_int ((idx % const_k) * width); })))))))))
5183
5184 /* Simplify a bit extraction from a bit insertion for the cases with
5185 the inserted element fully covering the extraction or the insertion
5186 not touching the extraction. */
5187 (simplify
5188 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
5189 (with
5190 {
5191 unsigned HOST_WIDE_INT isize;
5192 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
5193 isize = TYPE_PRECISION (TREE_TYPE (@1));
5194 else
5195 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
5196 }
5197 (switch
5198 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
5199 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5200 wi::to_wide (@ipos) + isize))
5201 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
5202 wi::to_wide (@rpos)
5203 - wi::to_wide (@ipos)); }))
5204 (if (wi::geu_p (wi::to_wide (@ipos),
5205 wi::to_wide (@rpos) + wi::to_wide (@rsize))
5206 || wi::geu_p (wi::to_wide (@rpos),
5207 wi::to_wide (@ipos) + isize))
5208 (BIT_FIELD_REF @0 @rsize @rpos)))))
5209
5210 (if (canonicalize_math_after_vectorization_p ())
5211 (for fmas (FMA)
5212 (simplify
5213 (fmas:c (negate @0) @1 @2)
5214 (IFN_FNMA @0 @1 @2))
5215 (simplify
5216 (fmas @0 @1 (negate @2))
5217 (IFN_FMS @0 @1 @2))
5218 (simplify
5219 (fmas:c (negate @0) @1 (negate @2))
5220 (IFN_FNMS @0 @1 @2))
5221 (simplify
5222 (negate (fmas@3 @0 @1 @2))
5223 (if (single_use (@3))
5224 (IFN_FNMS @0 @1 @2))))
5225
5226 (simplify
5227 (IFN_FMS:c (negate @0) @1 @2)
5228 (IFN_FNMS @0 @1 @2))
5229 (simplify
5230 (IFN_FMS @0 @1 (negate @2))
5231 (IFN_FMA @0 @1 @2))
5232 (simplify
5233 (IFN_FMS:c (negate @0) @1 (negate @2))
5234 (IFN_FNMA @0 @1 @2))
5235 (simplify
5236 (negate (IFN_FMS@3 @0 @1 @2))
5237 (if (single_use (@3))
5238 (IFN_FNMA @0 @1 @2)))
5239
5240 (simplify
5241 (IFN_FNMA:c (negate @0) @1 @2)
5242 (IFN_FMA @0 @1 @2))
5243 (simplify
5244 (IFN_FNMA @0 @1 (negate @2))
5245 (IFN_FNMS @0 @1 @2))
5246 (simplify
5247 (IFN_FNMA:c (negate @0) @1 (negate @2))
5248 (IFN_FMS @0 @1 @2))
5249 (simplify
5250 (negate (IFN_FNMA@3 @0 @1 @2))
5251 (if (single_use (@3))
5252 (IFN_FMS @0 @1 @2)))
5253
5254 (simplify
5255 (IFN_FNMS:c (negate @0) @1 @2)
5256 (IFN_FMS @0 @1 @2))
5257 (simplify
5258 (IFN_FNMS @0 @1 (negate @2))
5259 (IFN_FNMA @0 @1 @2))
5260 (simplify
5261 (IFN_FNMS:c (negate @0) @1 (negate @2))
5262 (IFN_FMA @0 @1 @2))
5263 (simplify
5264 (negate (IFN_FNMS@3 @0 @1 @2))
5265 (if (single_use (@3))
5266 (IFN_FMA @0 @1 @2))))
5267
5268 /* POPCOUNT simplifications. */
5269 (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5270 BUILT_IN_POPCOUNTIMAX)
5271 /* popcount(X&1) is nop_expr(X&1). */
5272 (simplify
5273 (popcount @0)
5274 (if (tree_nonzero_bits (@0) == 1)
5275 (convert @0)))
5276 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5277 (simplify
5278 (plus (popcount:s @0) (popcount:s @1))
5279 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5280 (popcount (bit_ior @0 @1))))
5281 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5282 (for cmp (le eq ne gt)
5283 rep (eq eq ne ne)
5284 (simplify
5285 (cmp (popcount @0) integer_zerop)
5286 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5287
5288 /* Simplify:
5289
5290 a = a1 op a2
5291 r = c ? a : b;
5292
5293 to:
5294
5295 r = c ? a1 op a2 : b;
5296
5297 if the target can do it in one go. This makes the operation conditional
5298 on c, so could drop potentially-trapping arithmetic, but that's a valid
5299 simplification if the result of the operation isn't needed.
5300
5301 Avoid speculatively generating a stand-alone vector comparison
5302 on targets that might not support them. Any target implementing
5303 conditional internal functions must support the same comparisons
5304 inside and outside a VEC_COND_EXPR. */
5305
5306 #if GIMPLE
5307 (for uncond_op (UNCOND_BINARY)
5308 cond_op (COND_BINARY)
5309 (simplify
5310 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5311 (with { tree op_type = TREE_TYPE (@4); }
5312 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5313 && element_precision (type) == element_precision (op_type))
5314 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5315 (simplify
5316 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5317 (with { tree op_type = TREE_TYPE (@4); }
5318 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5319 && element_precision (type) == element_precision (op_type))
5320 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
5321
5322 /* Same for ternary operations. */
5323 (for uncond_op (UNCOND_TERNARY)
5324 cond_op (COND_TERNARY)
5325 (simplify
5326 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5327 (with { tree op_type = TREE_TYPE (@5); }
5328 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5329 && element_precision (type) == element_precision (op_type))
5330 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5331 (simplify
5332 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5333 (with { tree op_type = TREE_TYPE (@5); }
5334 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5335 && element_precision (type) == element_precision (op_type))
5336 (view_convert (cond_op (bit_not @0) @2 @3 @4
5337 (view_convert:op_type @1)))))))
5338 #endif
5339
5340 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
5341 "else" value of an IFN_COND_*. */
5342 (for cond_op (COND_BINARY)
5343 (simplify
5344 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5345 (with { tree op_type = TREE_TYPE (@3); }
5346 (if (element_precision (type) == element_precision (op_type))
5347 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5348 (simplify
5349 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5350 (with { tree op_type = TREE_TYPE (@5); }
5351 (if (inverse_conditions_p (@0, @2)
5352 && element_precision (type) == element_precision (op_type))
5353 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
5354
5355 /* Same for ternary operations. */
5356 (for cond_op (COND_TERNARY)
5357 (simplify
5358 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5359 (with { tree op_type = TREE_TYPE (@4); }
5360 (if (element_precision (type) == element_precision (op_type))
5361 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5362 (simplify
5363 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5364 (with { tree op_type = TREE_TYPE (@6); }
5365 (if (inverse_conditions_p (@0, @2)
5366 && element_precision (type) == element_precision (op_type))
5367 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
5368
5369 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
5370 expressions like:
5371
5372 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5373 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5374
5375 If pointers are known not to wrap, B checks whether @1 bytes starting
5376 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5377 bytes. A is more efficiently tested as:
5378
5379 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5380
5381 The equivalent expression for B is given by replacing @1 with @1 - 1:
5382
5383 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5384
5385 @0 and @2 can be swapped in both expressions without changing the result.
5386
5387 The folds rely on sizetype's being unsigned (which is always true)
5388 and on its being the same width as the pointer (which we have to check).
5389
5390 The fold replaces two pointer_plus expressions, two comparisons and
5391 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5392 the best case it's a saving of two operations. The A fold retains one
5393 of the original pointer_pluses, so is a win even if both pointer_pluses
5394 are used elsewhere. The B fold is a wash if both pointer_pluses are
5395 used elsewhere, since all we end up doing is replacing a comparison with
5396 a pointer_plus. We do still apply the fold under those circumstances
5397 though, in case applying it to other conditions eventually makes one of the
5398 pointer_pluses dead. */
5399 (for ior (truth_orif truth_or bit_ior)
5400 (for cmp (le lt)
5401 (simplify
5402 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5403 (cmp:cs (pointer_plus@4 @2 @1) @0))
5404 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5405 && TYPE_OVERFLOW_WRAPS (sizetype)
5406 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5407 /* Calculate the rhs constant. */
5408 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5409 offset_int rhs = off * 2; }
5410 /* Always fails for negative values. */
5411 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5412 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5413 pick a canonical order. This increases the chances of using the
5414 same pointer_plus in multiple checks. */
5415 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5416 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5417 (if (cmp == LT_EXPR)
5418 (gt (convert:sizetype
5419 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5420 { swap_p ? @0 : @2; }))
5421 { rhs_tree; })
5422 (gt (convert:sizetype
5423 (pointer_diff:ssizetype
5424 (pointer_plus { swap_p ? @2 : @0; }
5425 { wide_int_to_tree (sizetype, off); })
5426 { swap_p ? @0 : @2; }))
5427 { rhs_tree; })))))))))
5428
5429 /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
5430 element of @1. */
5431 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
5432 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
5433 (with { int i = single_nonzero_element (@1); }
5434 (if (i >= 0)
5435 (with { tree elt = vector_cst_elt (@1, i);
5436 tree elt_type = TREE_TYPE (elt);
5437 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
5438 tree size = bitsize_int (elt_bits);
5439 tree pos = bitsize_int (elt_bits * i); }
5440 (view_convert
5441 (bit_and:elt_type
5442 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
5443 { elt; })))))))
5444
5445 (simplify
5446 (vec_perm @0 @1 VECTOR_CST@2)
5447 (with
5448 {
5449 tree op0 = @0, op1 = @1, op2 = @2;
5450
5451 /* Build a vector of integers from the tree mask. */
5452 vec_perm_builder builder;
5453 if (!tree_to_vec_perm_builder (&builder, op2))
5454 return NULL_TREE;
5455
5456 /* Create a vec_perm_indices for the integer vector. */
5457 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
5458 bool single_arg = (op0 == op1);
5459 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
5460 }
5461 (if (sel.series_p (0, 1, 0, 1))
5462 { op0; }
5463 (if (sel.series_p (0, 1, nelts, 1))
5464 { op1; }
5465 (with
5466 {
5467 if (!single_arg)
5468 {
5469 if (sel.all_from_input_p (0))
5470 op1 = op0;
5471 else if (sel.all_from_input_p (1))
5472 {
5473 op0 = op1;
5474 sel.rotate_inputs (1);
5475 }
5476 else if (known_ge (poly_uint64 (sel[0]), nelts))
5477 {
5478 std::swap (op0, op1);
5479 sel.rotate_inputs (1);
5480 }
5481 }
5482 gassign *def;
5483 tree cop0 = op0, cop1 = op1;
5484 if (TREE_CODE (op0) == SSA_NAME
5485 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
5486 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
5487 cop0 = gimple_assign_rhs1 (def);
5488 if (TREE_CODE (op1) == SSA_NAME
5489 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
5490 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
5491 cop1 = gimple_assign_rhs1 (def);
5492
5493 tree t;
5494 }
5495 (if ((TREE_CODE (cop0) == VECTOR_CST
5496 || TREE_CODE (cop0) == CONSTRUCTOR)
5497 && (TREE_CODE (cop1) == VECTOR_CST
5498 || TREE_CODE (cop1) == CONSTRUCTOR)
5499 && (t = fold_vec_perm (type, cop0, cop1, sel)))
5500 { t; }
5501 (with
5502 {
5503 bool changed = (op0 == op1 && !single_arg);
5504 tree ins = NULL_TREE;
5505 unsigned at = 0;
5506
5507 /* See if the permutation is performing a single element
5508 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
5509 in that case. But only if the vector mode is supported,
5510 otherwise this is invalid GIMPLE. */
5511 if (TYPE_MODE (type) != BLKmode
5512 && (TREE_CODE (cop0) == VECTOR_CST
5513 || TREE_CODE (cop0) == CONSTRUCTOR
5514 || TREE_CODE (cop1) == VECTOR_CST
5515 || TREE_CODE (cop1) == CONSTRUCTOR))
5516 {
5517 if (sel.series_p (1, 1, nelts + 1, 1))
5518 {
5519 /* After canonicalizing the first elt to come from the
5520 first vector we only can insert the first elt from
5521 the first vector. */
5522 at = 0;
5523 if ((ins = fold_read_from_vector (cop0, 0)))
5524 op0 = op1;
5525 }
5526 else
5527 {
5528 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
5529 for (at = 0; at < encoded_nelts; ++at)
5530 if (maybe_ne (sel[at], at))
5531 break;
5532 if (at < encoded_nelts && sel.series_p (at + 1, 1, at + 1, 1))
5533 {
5534 if (known_lt (at, nelts))
5535 ins = fold_read_from_vector (cop0, sel[at]);
5536 else
5537 ins = fold_read_from_vector (cop1, sel[at] - nelts);
5538 }
5539 }
5540 }
5541
5542 /* Generate a canonical form of the selector. */
5543 if (!ins && sel.encoding () != builder)
5544 {
5545 /* Some targets are deficient and fail to expand a single
5546 argument permutation while still allowing an equivalent
5547 2-argument version. */
5548 tree oldop2 = op2;
5549 if (sel.ninputs () == 2
5550 || can_vec_perm_const_p (TYPE_MODE (type), sel, false))
5551 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
5552 else
5553 {
5554 vec_perm_indices sel2 (builder, 2, nelts);
5555 if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false))
5556 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
5557 else
5558 /* Not directly supported with either encoding,
5559 so use the preferred form. */
5560 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
5561 }
5562 if (!operand_equal_p (op2, oldop2, 0))
5563 changed = true;
5564 }
5565 }
5566 (if (ins)
5567 (bit_insert { op0; } { ins; }
5568 { bitsize_int (at * tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))); })
5569 (if (changed)
5570 (vec_perm { op0; } { op1; } { op2; }))))))))))