]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/match.pd
Update copyright years.
[thirdparty/gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2019 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 CONSTANT_CLASS_P
33 tree_expr_nonnegative_p
34 tree_expr_nonzero_p
35 integer_valued_real_p
36 integer_pow2p
37 uniform_integer_cst_p
38 HONOR_NANS)
39
40 /* Operator lists. */
41 (define_operator_list tcc_comparison
42 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
43 (define_operator_list inverted_tcc_comparison
44 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
45 (define_operator_list inverted_tcc_comparison_with_nans
46 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
47 (define_operator_list swapped_tcc_comparison
48 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
49 (define_operator_list simple_comparison lt le eq ne ge gt)
50 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
51
52 #include "cfn-operators.pd"
53
54 /* Define operand lists for math rounding functions {,i,l,ll}FN,
55 where the versions prefixed with "i" return an int, those prefixed with
56 "l" return a long and those prefixed with "ll" return a long long.
57
58 Also define operand lists:
59
60 X<FN>F for all float functions, in the order i, l, ll
61 X<FN> for all double functions, in the same order
62 X<FN>L for all long double functions, in the same order. */
63 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
64 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
65 BUILT_IN_L##FN##F \
66 BUILT_IN_LL##FN##F) \
67 (define_operator_list X##FN BUILT_IN_I##FN \
68 BUILT_IN_L##FN \
69 BUILT_IN_LL##FN) \
70 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
71 BUILT_IN_L##FN##L \
72 BUILT_IN_LL##FN##L)
73
74 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
77 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
78
79 /* Binary operations and their associated IFN_COND_* function. */
80 (define_operator_list UNCOND_BINARY
81 plus minus
82 mult trunc_div trunc_mod rdiv
83 min max
84 bit_and bit_ior bit_xor)
85 (define_operator_list COND_BINARY
86 IFN_COND_ADD IFN_COND_SUB
87 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
88 IFN_COND_MIN IFN_COND_MAX
89 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR)
90
91 /* Same for ternary operations. */
92 (define_operator_list UNCOND_TERNARY
93 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
94 (define_operator_list COND_TERNARY
95 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
96
97 /* As opposed to convert?, this still creates a single pattern, so
98 it is not a suitable replacement for convert? in all cases. */
99 (match (nop_convert @0)
100 (convert @0)
101 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
102 (match (nop_convert @0)
103 (view_convert @0)
104 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
105 && known_eq (TYPE_VECTOR_SUBPARTS (type),
106 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
107 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
108 /* This one has to be last, or it shadows the others. */
109 (match (nop_convert @0)
110 @0)
111
112 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
113 ABSU_EXPR returns unsigned absolute value of the operand and the operand
114 of the ABSU_EXPR will have the corresponding signed type. */
115 (simplify (abs (convert @0))
116 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
117 && !TYPE_UNSIGNED (TREE_TYPE (@0))
118 && element_precision (type) > element_precision (TREE_TYPE (@0)))
119 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
120 (convert (absu:utype @0)))))
121
122
123 /* Simplifications of operations with one constant operand and
124 simplifications to constants or single values. */
125
126 (for op (plus pointer_plus minus bit_ior bit_xor)
127 (simplify
128 (op @0 integer_zerop)
129 (non_lvalue @0)))
130
131 /* 0 +p index -> (type)index */
132 (simplify
133 (pointer_plus integer_zerop @1)
134 (non_lvalue (convert @1)))
135
136 /* ptr - 0 -> (type)ptr */
137 (simplify
138 (pointer_diff @0 integer_zerop)
139 (convert @0))
140
141 /* See if ARG1 is zero and X + ARG1 reduces to X.
142 Likewise if the operands are reversed. */
143 (simplify
144 (plus:c @0 real_zerop@1)
145 (if (fold_real_zero_addition_p (type, @1, 0))
146 (non_lvalue @0)))
147
148 /* See if ARG1 is zero and X - ARG1 reduces to X. */
149 (simplify
150 (minus @0 real_zerop@1)
151 (if (fold_real_zero_addition_p (type, @1, 1))
152 (non_lvalue @0)))
153
154 /* Simplify x - x.
155 This is unsafe for certain floats even in non-IEEE formats.
156 In IEEE, it is unsafe because it does wrong for NaNs.
157 Also note that operand_equal_p is always false if an operand
158 is volatile. */
159 (simplify
160 (minus @0 @0)
161 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
162 { build_zero_cst (type); }))
163 (simplify
164 (pointer_diff @@0 @0)
165 { build_zero_cst (type); })
166
167 (simplify
168 (mult @0 integer_zerop@1)
169 @1)
170
171 /* Maybe fold x * 0 to 0. The expressions aren't the same
172 when x is NaN, since x * 0 is also NaN. Nor are they the
173 same in modes with signed zeros, since multiplying a
174 negative value by 0 gives -0, not +0. */
175 (simplify
176 (mult @0 real_zerop@1)
177 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
178 @1))
179
180 /* In IEEE floating point, x*1 is not equivalent to x for snans.
181 Likewise for complex arithmetic with signed zeros. */
182 (simplify
183 (mult @0 real_onep)
184 (if (!HONOR_SNANS (type)
185 && (!HONOR_SIGNED_ZEROS (type)
186 || !COMPLEX_FLOAT_TYPE_P (type)))
187 (non_lvalue @0)))
188
189 /* Transform x * -1.0 into -x. */
190 (simplify
191 (mult @0 real_minus_onep)
192 (if (!HONOR_SNANS (type)
193 && (!HONOR_SIGNED_ZEROS (type)
194 || !COMPLEX_FLOAT_TYPE_P (type)))
195 (negate @0)))
196
197 (for cmp (gt ge lt le)
198 outp (convert convert negate negate)
199 outn (negate negate convert convert)
200 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
201 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
202 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
203 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
204 (simplify
205 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
206 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
207 && types_match (type, TREE_TYPE (@0)))
208 (switch
209 (if (types_match (type, float_type_node))
210 (BUILT_IN_COPYSIGNF @1 (outp @0)))
211 (if (types_match (type, double_type_node))
212 (BUILT_IN_COPYSIGN @1 (outp @0)))
213 (if (types_match (type, long_double_type_node))
214 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
215 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
216 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
217 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
218 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
219 (simplify
220 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
221 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
222 && types_match (type, TREE_TYPE (@0)))
223 (switch
224 (if (types_match (type, float_type_node))
225 (BUILT_IN_COPYSIGNF @1 (outn @0)))
226 (if (types_match (type, double_type_node))
227 (BUILT_IN_COPYSIGN @1 (outn @0)))
228 (if (types_match (type, long_double_type_node))
229 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
230
231 /* Transform X * copysign (1.0, X) into abs(X). */
232 (simplify
233 (mult:c @0 (COPYSIGN_ALL real_onep @0))
234 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
235 (abs @0)))
236
237 /* Transform X * copysign (1.0, -X) into -abs(X). */
238 (simplify
239 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
240 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
241 (negate (abs @0))))
242
243 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
244 (simplify
245 (COPYSIGN_ALL REAL_CST@0 @1)
246 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
247 (COPYSIGN_ALL (negate @0) @1)))
248
249 /* X * 1, X / 1 -> X. */
250 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
251 (simplify
252 (op @0 integer_onep)
253 (non_lvalue @0)))
254
255 /* (A / (1 << B)) -> (A >> B).
256 Only for unsigned A. For signed A, this would not preserve rounding
257 toward zero.
258 For example: (-1 / ( 1 << B)) != -1 >> B. */
259 (simplify
260 (trunc_div @0 (lshift integer_onep@1 @2))
261 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
262 && (!VECTOR_TYPE_P (type)
263 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
264 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
265 (rshift @0 @2)))
266
267 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
268 undefined behavior in constexpr evaluation, and assuming that the division
269 traps enables better optimizations than these anyway. */
270 (for div (trunc_div ceil_div floor_div round_div exact_div)
271 /* 0 / X is always zero. */
272 (simplify
273 (div integer_zerop@0 @1)
274 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
275 (if (!integer_zerop (@1))
276 @0))
277 /* X / -1 is -X. */
278 (simplify
279 (div @0 integer_minus_onep@1)
280 (if (!TYPE_UNSIGNED (type))
281 (negate @0)))
282 /* X / X is one. */
283 (simplify
284 (div @0 @0)
285 /* But not for 0 / 0 so that we can get the proper warnings and errors.
286 And not for _Fract types where we can't build 1. */
287 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
288 { build_one_cst (type); }))
289 /* X / abs (X) is X < 0 ? -1 : 1. */
290 (simplify
291 (div:C @0 (abs @0))
292 (if (INTEGRAL_TYPE_P (type)
293 && TYPE_OVERFLOW_UNDEFINED (type))
294 (cond (lt @0 { build_zero_cst (type); })
295 { build_minus_one_cst (type); } { build_one_cst (type); })))
296 /* X / -X is -1. */
297 (simplify
298 (div:C @0 (negate @0))
299 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
300 && TYPE_OVERFLOW_UNDEFINED (type))
301 { build_minus_one_cst (type); })))
302
303 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
304 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
305 (simplify
306 (floor_div @0 @1)
307 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
308 && TYPE_UNSIGNED (type))
309 (trunc_div @0 @1)))
310
311 /* Combine two successive divisions. Note that combining ceil_div
312 and floor_div is trickier and combining round_div even more so. */
313 (for div (trunc_div exact_div)
314 (simplify
315 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
316 (with {
317 wi::overflow_type overflow;
318 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
319 TYPE_SIGN (type), &overflow);
320 }
321 (if (div == EXACT_DIV_EXPR
322 || optimize_successive_divisions_p (@2, @3))
323 (if (!overflow)
324 (div @0 { wide_int_to_tree (type, mul); })
325 (if (TYPE_UNSIGNED (type)
326 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
327 { build_zero_cst (type); }))))))
328
329 /* Combine successive multiplications. Similar to above, but handling
330 overflow is different. */
331 (simplify
332 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
333 (with {
334 wi::overflow_type overflow;
335 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
336 TYPE_SIGN (type), &overflow);
337 }
338 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
339 otherwise undefined overflow implies that @0 must be zero. */
340 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
341 (mult @0 { wide_int_to_tree (type, mul); }))))
342
343 /* Optimize A / A to 1.0 if we don't care about
344 NaNs or Infinities. */
345 (simplify
346 (rdiv @0 @0)
347 (if (FLOAT_TYPE_P (type)
348 && ! HONOR_NANS (type)
349 && ! HONOR_INFINITIES (type))
350 { build_one_cst (type); }))
351
352 /* Optimize -A / A to -1.0 if we don't care about
353 NaNs or Infinities. */
354 (simplify
355 (rdiv:C @0 (negate @0))
356 (if (FLOAT_TYPE_P (type)
357 && ! HONOR_NANS (type)
358 && ! HONOR_INFINITIES (type))
359 { build_minus_one_cst (type); }))
360
361 /* PR71078: x / abs(x) -> copysign (1.0, x) */
362 (simplify
363 (rdiv:C (convert? @0) (convert? (abs @0)))
364 (if (SCALAR_FLOAT_TYPE_P (type)
365 && ! HONOR_NANS (type)
366 && ! HONOR_INFINITIES (type))
367 (switch
368 (if (types_match (type, float_type_node))
369 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
370 (if (types_match (type, double_type_node))
371 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
372 (if (types_match (type, long_double_type_node))
373 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
374
375 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
376 (simplify
377 (rdiv @0 real_onep)
378 (if (!HONOR_SNANS (type))
379 (non_lvalue @0)))
380
381 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
382 (simplify
383 (rdiv @0 real_minus_onep)
384 (if (!HONOR_SNANS (type))
385 (negate @0)))
386
387 (if (flag_reciprocal_math)
388 /* Convert (A/B)/C to A/(B*C). */
389 (simplify
390 (rdiv (rdiv:s @0 @1) @2)
391 (rdiv @0 (mult @1 @2)))
392
393 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
394 (simplify
395 (rdiv @0 (mult:s @1 REAL_CST@2))
396 (with
397 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
398 (if (tem)
399 (rdiv (mult @0 { tem; } ) @1))))
400
401 /* Convert A/(B/C) to (A/B)*C */
402 (simplify
403 (rdiv @0 (rdiv:s @1 @2))
404 (mult (rdiv @0 @1) @2)))
405
406 /* Simplify x / (- y) to -x / y. */
407 (simplify
408 (rdiv @0 (negate @1))
409 (rdiv (negate @0) @1))
410
411 (if (flag_unsafe_math_optimizations)
412 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
413 Since C / x may underflow to zero, do this only for unsafe math. */
414 (for op (lt le gt ge)
415 neg_op (gt ge lt le)
416 (simplify
417 (op (rdiv REAL_CST@0 @1) real_zerop@2)
418 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
419 (switch
420 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
421 (op @1 @2))
422 /* For C < 0, use the inverted operator. */
423 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
424 (neg_op @1 @2)))))))
425
426 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
427 (for div (trunc_div ceil_div floor_div round_div exact_div)
428 (simplify
429 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
430 (if (integer_pow2p (@2)
431 && tree_int_cst_sgn (@2) > 0
432 && tree_nop_conversion_p (type, TREE_TYPE (@0))
433 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
434 (rshift (convert @0)
435 { build_int_cst (integer_type_node,
436 wi::exact_log2 (wi::to_wide (@2))); }))))
437
438 /* If ARG1 is a constant, we can convert this to a multiply by the
439 reciprocal. This does not have the same rounding properties,
440 so only do this if -freciprocal-math. We can actually
441 always safely do it if ARG1 is a power of two, but it's hard to
442 tell if it is or not in a portable manner. */
443 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
444 (simplify
445 (rdiv @0 cst@1)
446 (if (optimize)
447 (if (flag_reciprocal_math
448 && !real_zerop (@1))
449 (with
450 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
451 (if (tem)
452 (mult @0 { tem; } )))
453 (if (cst != COMPLEX_CST)
454 (with { tree inverse = exact_inverse (type, @1); }
455 (if (inverse)
456 (mult @0 { inverse; } ))))))))
457
458 (for mod (ceil_mod floor_mod round_mod trunc_mod)
459 /* 0 % X is always zero. */
460 (simplify
461 (mod integer_zerop@0 @1)
462 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
463 (if (!integer_zerop (@1))
464 @0))
465 /* X % 1 is always zero. */
466 (simplify
467 (mod @0 integer_onep)
468 { build_zero_cst (type); })
469 /* X % -1 is zero. */
470 (simplify
471 (mod @0 integer_minus_onep@1)
472 (if (!TYPE_UNSIGNED (type))
473 { build_zero_cst (type); }))
474 /* X % X is zero. */
475 (simplify
476 (mod @0 @0)
477 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
478 (if (!integer_zerop (@0))
479 { build_zero_cst (type); }))
480 /* (X % Y) % Y is just X % Y. */
481 (simplify
482 (mod (mod@2 @0 @1) @1)
483 @2)
484 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
485 (simplify
486 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
487 (if (ANY_INTEGRAL_TYPE_P (type)
488 && TYPE_OVERFLOW_UNDEFINED (type)
489 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
490 TYPE_SIGN (type)))
491 { build_zero_cst (type); }))
492 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
493 modulo and comparison, since it is simpler and equivalent. */
494 (for cmp (eq ne)
495 (simplify
496 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
497 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
498 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
499 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
500
501 /* X % -C is the same as X % C. */
502 (simplify
503 (trunc_mod @0 INTEGER_CST@1)
504 (if (TYPE_SIGN (type) == SIGNED
505 && !TREE_OVERFLOW (@1)
506 && wi::neg_p (wi::to_wide (@1))
507 && !TYPE_OVERFLOW_TRAPS (type)
508 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
509 && !sign_bit_p (@1, @1))
510 (trunc_mod @0 (negate @1))))
511
512 /* X % -Y is the same as X % Y. */
513 (simplify
514 (trunc_mod @0 (convert? (negate @1)))
515 (if (INTEGRAL_TYPE_P (type)
516 && !TYPE_UNSIGNED (type)
517 && !TYPE_OVERFLOW_TRAPS (type)
518 && tree_nop_conversion_p (type, TREE_TYPE (@1))
519 /* Avoid this transformation if X might be INT_MIN or
520 Y might be -1, because we would then change valid
521 INT_MIN % -(-1) into invalid INT_MIN % -1. */
522 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
523 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
524 (TREE_TYPE (@1))))))
525 (trunc_mod @0 (convert @1))))
526
527 /* X - (X / Y) * Y is the same as X % Y. */
528 (simplify
529 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
530 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
531 (convert (trunc_mod @0 @1))))
532
533 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
534 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
535 Also optimize A % (C << N) where C is a power of 2,
536 to A & ((C << N) - 1). */
537 (match (power_of_two_cand @1)
538 INTEGER_CST@1)
539 (match (power_of_two_cand @1)
540 (lshift INTEGER_CST@1 @2))
541 (for mod (trunc_mod floor_mod)
542 (simplify
543 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
544 (if ((TYPE_UNSIGNED (type)
545 || tree_expr_nonnegative_p (@0))
546 && tree_nop_conversion_p (type, TREE_TYPE (@3))
547 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
548 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
549
550 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
551 (simplify
552 (trunc_div (mult @0 integer_pow2p@1) @1)
553 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
554 (bit_and @0 { wide_int_to_tree
555 (type, wi::mask (TYPE_PRECISION (type)
556 - wi::exact_log2 (wi::to_wide (@1)),
557 false, TYPE_PRECISION (type))); })))
558
559 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
560 (simplify
561 (mult (trunc_div @0 integer_pow2p@1) @1)
562 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
563 (bit_and @0 (negate @1))))
564
565 /* Simplify (t * 2) / 2) -> t. */
566 (for div (trunc_div ceil_div floor_div round_div exact_div)
567 (simplify
568 (div (mult:c @0 @1) @1)
569 (if (ANY_INTEGRAL_TYPE_P (type)
570 && TYPE_OVERFLOW_UNDEFINED (type))
571 @0)))
572
573 (for op (negate abs)
574 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
575 (for coss (COS COSH)
576 (simplify
577 (coss (op @0))
578 (coss @0)))
579 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
580 (for pows (POW)
581 (simplify
582 (pows (op @0) REAL_CST@1)
583 (with { HOST_WIDE_INT n; }
584 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
585 (pows @0 @1)))))
586 /* Likewise for powi. */
587 (for pows (POWI)
588 (simplify
589 (pows (op @0) INTEGER_CST@1)
590 (if ((wi::to_wide (@1) & 1) == 0)
591 (pows @0 @1))))
592 /* Strip negate and abs from both operands of hypot. */
593 (for hypots (HYPOT)
594 (simplify
595 (hypots (op @0) @1)
596 (hypots @0 @1))
597 (simplify
598 (hypots @0 (op @1))
599 (hypots @0 @1)))
600 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
601 (for copysigns (COPYSIGN_ALL)
602 (simplify
603 (copysigns (op @0) @1)
604 (copysigns @0 @1))))
605
606 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
607 (simplify
608 (mult (abs@1 @0) @1)
609 (mult @0 @0))
610
611 /* Convert absu(x)*absu(x) -> x*x. */
612 (simplify
613 (mult (absu@1 @0) @1)
614 (mult (convert@2 @0) @2))
615
616 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
617 (for coss (COS COSH)
618 copysigns (COPYSIGN)
619 (simplify
620 (coss (copysigns @0 @1))
621 (coss @0)))
622
623 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
624 (for pows (POW)
625 copysigns (COPYSIGN)
626 (simplify
627 (pows (copysigns @0 @2) REAL_CST@1)
628 (with { HOST_WIDE_INT n; }
629 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
630 (pows @0 @1)))))
631 /* Likewise for powi. */
632 (for pows (POWI)
633 copysigns (COPYSIGN)
634 (simplify
635 (pows (copysigns @0 @2) INTEGER_CST@1)
636 (if ((wi::to_wide (@1) & 1) == 0)
637 (pows @0 @1))))
638
639 (for hypots (HYPOT)
640 copysigns (COPYSIGN)
641 /* hypot(copysign(x, y), z) -> hypot(x, z). */
642 (simplify
643 (hypots (copysigns @0 @1) @2)
644 (hypots @0 @2))
645 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
646 (simplify
647 (hypots @0 (copysigns @1 @2))
648 (hypots @0 @1)))
649
650 /* copysign(x, CST) -> [-]abs (x). */
651 (for copysigns (COPYSIGN_ALL)
652 (simplify
653 (copysigns @0 REAL_CST@1)
654 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
655 (negate (abs @0))
656 (abs @0))))
657
658 /* copysign(copysign(x, y), z) -> copysign(x, z). */
659 (for copysigns (COPYSIGN_ALL)
660 (simplify
661 (copysigns (copysigns @0 @1) @2)
662 (copysigns @0 @2)))
663
664 /* copysign(x,y)*copysign(x,y) -> x*x. */
665 (for copysigns (COPYSIGN_ALL)
666 (simplify
667 (mult (copysigns@2 @0 @1) @2)
668 (mult @0 @0)))
669
670 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
671 (for ccoss (CCOS CCOSH)
672 (simplify
673 (ccoss (negate @0))
674 (ccoss @0)))
675
676 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
677 (for ops (conj negate)
678 (for cabss (CABS)
679 (simplify
680 (cabss (ops @0))
681 (cabss @0))))
682
683 /* Fold (a * (1 << b)) into (a << b) */
684 (simplify
685 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
686 (if (! FLOAT_TYPE_P (type)
687 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
688 (lshift @0 @2)))
689
690 /* Fold (1 << (C - x)) where C = precision(type) - 1
691 into ((1 << C) >> x). */
692 (simplify
693 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
694 (if (INTEGRAL_TYPE_P (type)
695 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
696 && single_use (@1))
697 (if (TYPE_UNSIGNED (type))
698 (rshift (lshift @0 @2) @3)
699 (with
700 { tree utype = unsigned_type_for (type); }
701 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
702
703 /* Fold (C1/X)*C2 into (C1*C2)/X. */
704 (simplify
705 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
706 (if (flag_associative_math
707 && single_use (@3))
708 (with
709 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
710 (if (tem)
711 (rdiv { tem; } @1)))))
712
713 /* Simplify ~X & X as zero. */
714 (simplify
715 (bit_and:c (convert? @0) (convert? (bit_not @0)))
716 { build_zero_cst (type); })
717
718 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
719 (simplify
720 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
721 (if (TYPE_UNSIGNED (type))
722 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
723
724 (for bitop (bit_and bit_ior)
725 cmp (eq ne)
726 /* PR35691: Transform
727 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
728 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
729 (simplify
730 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
731 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
732 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
733 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
734 (cmp (bit_ior @0 (convert @1)) @2)))
735 /* Transform:
736 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
737 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
738 (simplify
739 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
740 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
741 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
742 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
743 (cmp (bit_and @0 (convert @1)) @2))))
744
745 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
746 (simplify
747 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
748 (minus (bit_xor @0 @1) @1))
749 (simplify
750 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
751 (if (~wi::to_wide (@2) == wi::to_wide (@1))
752 (minus (bit_xor @0 @1) @1)))
753
754 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
755 (simplify
756 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
757 (minus @1 (bit_xor @0 @1)))
758
759 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
760 (for op (bit_ior bit_xor plus)
761 (simplify
762 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
763 (bit_xor @0 @1))
764 (simplify
765 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
766 (if (~wi::to_wide (@2) == wi::to_wide (@1))
767 (bit_xor @0 @1))))
768
769 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
770 (simplify
771 (bit_ior:c (bit_xor:c @0 @1) @0)
772 (bit_ior @0 @1))
773
774 /* (a & ~b) | (a ^ b) --> a ^ b */
775 (simplify
776 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
777 @2)
778
779 /* (a & ~b) ^ ~a --> ~(a & b) */
780 (simplify
781 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
782 (bit_not (bit_and @0 @1)))
783
784 /* (a | b) & ~(a ^ b) --> a & b */
785 (simplify
786 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
787 (bit_and @0 @1))
788
789 /* a | ~(a ^ b) --> a | ~b */
790 (simplify
791 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
792 (bit_ior @0 (bit_not @1)))
793
794 /* (a | b) | (a &^ b) --> a | b */
795 (for op (bit_and bit_xor)
796 (simplify
797 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
798 @2))
799
800 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
801 (simplify
802 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
803 @2)
804
805 /* ~(~a & b) --> a | ~b */
806 (simplify
807 (bit_not (bit_and:cs (bit_not @0) @1))
808 (bit_ior @0 (bit_not @1)))
809
810 /* ~(~a | b) --> a & ~b */
811 (simplify
812 (bit_not (bit_ior:cs (bit_not @0) @1))
813 (bit_and @0 (bit_not @1)))
814
815 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
816 #if GIMPLE
817 (simplify
818 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
819 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
820 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
821 (bit_xor @0 @1)))
822 #endif
823
824 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
825 ((A & N) + B) & M -> (A + B) & M
826 Similarly if (N & M) == 0,
827 ((A | N) + B) & M -> (A + B) & M
828 and for - instead of + (or unary - instead of +)
829 and/or ^ instead of |.
830 If B is constant and (B & M) == 0, fold into A & M. */
831 (for op (plus minus)
832 (for bitop (bit_and bit_ior bit_xor)
833 (simplify
834 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
835 (with
836 { tree pmop[2];
837 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
838 @3, @4, @1, ERROR_MARK, NULL_TREE,
839 NULL_TREE, pmop); }
840 (if (utype)
841 (convert (bit_and (op (convert:utype { pmop[0]; })
842 (convert:utype { pmop[1]; }))
843 (convert:utype @2))))))
844 (simplify
845 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
846 (with
847 { tree pmop[2];
848 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
849 NULL_TREE, NULL_TREE, @1, bitop, @3,
850 @4, pmop); }
851 (if (utype)
852 (convert (bit_and (op (convert:utype { pmop[0]; })
853 (convert:utype { pmop[1]; }))
854 (convert:utype @2)))))))
855 (simplify
856 (bit_and (op:s @0 @1) INTEGER_CST@2)
857 (with
858 { tree pmop[2];
859 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
860 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
861 NULL_TREE, NULL_TREE, pmop); }
862 (if (utype)
863 (convert (bit_and (op (convert:utype { pmop[0]; })
864 (convert:utype { pmop[1]; }))
865 (convert:utype @2)))))))
866 (for bitop (bit_and bit_ior bit_xor)
867 (simplify
868 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
869 (with
870 { tree pmop[2];
871 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
872 bitop, @2, @3, NULL_TREE, ERROR_MARK,
873 NULL_TREE, NULL_TREE, pmop); }
874 (if (utype)
875 (convert (bit_and (negate (convert:utype { pmop[0]; }))
876 (convert:utype @1)))))))
877
878 /* X % Y is smaller than Y. */
879 (for cmp (lt ge)
880 (simplify
881 (cmp (trunc_mod @0 @1) @1)
882 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
883 { constant_boolean_node (cmp == LT_EXPR, type); })))
884 (for cmp (gt le)
885 (simplify
886 (cmp @1 (trunc_mod @0 @1))
887 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
888 { constant_boolean_node (cmp == GT_EXPR, type); })))
889
890 /* x | ~0 -> ~0 */
891 (simplify
892 (bit_ior @0 integer_all_onesp@1)
893 @1)
894
895 /* x | 0 -> x */
896 (simplify
897 (bit_ior @0 integer_zerop)
898 @0)
899
900 /* x & 0 -> 0 */
901 (simplify
902 (bit_and @0 integer_zerop@1)
903 @1)
904
905 /* ~x | x -> -1 */
906 /* ~x ^ x -> -1 */
907 /* ~x + x -> -1 */
908 (for op (bit_ior bit_xor plus)
909 (simplify
910 (op:c (convert? @0) (convert? (bit_not @0)))
911 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
912
913 /* x ^ x -> 0 */
914 (simplify
915 (bit_xor @0 @0)
916 { build_zero_cst (type); })
917
918 /* Canonicalize X ^ ~0 to ~X. */
919 (simplify
920 (bit_xor @0 integer_all_onesp@1)
921 (bit_not @0))
922
923 /* x & ~0 -> x */
924 (simplify
925 (bit_and @0 integer_all_onesp)
926 (non_lvalue @0))
927
928 /* x & x -> x, x | x -> x */
929 (for bitop (bit_and bit_ior)
930 (simplify
931 (bitop @0 @0)
932 (non_lvalue @0)))
933
934 /* x & C -> x if we know that x & ~C == 0. */
935 #if GIMPLE
936 (simplify
937 (bit_and SSA_NAME@0 INTEGER_CST@1)
938 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
939 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
940 @0))
941 #endif
942
943 /* x + (x & 1) -> (x + 1) & ~1 */
944 (simplify
945 (plus:c @0 (bit_and:s @0 integer_onep@1))
946 (bit_and (plus @0 @1) (bit_not @1)))
947
948 /* x & ~(x & y) -> x & ~y */
949 /* x | ~(x | y) -> x | ~y */
950 (for bitop (bit_and bit_ior)
951 (simplify
952 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
953 (bitop @0 (bit_not @1))))
954
955 /* (~x & y) | ~(x | y) -> ~x */
956 (simplify
957 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
958 @2)
959
960 /* (x | y) ^ (x | ~y) -> ~x */
961 (simplify
962 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
963 (bit_not @0))
964
965 /* (x & y) | ~(x | y) -> ~(x ^ y) */
966 (simplify
967 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
968 (bit_not (bit_xor @0 @1)))
969
970 /* (~x | y) ^ (x ^ y) -> x | ~y */
971 (simplify
972 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
973 (bit_ior @0 (bit_not @1)))
974
975 /* (x ^ y) | ~(x | y) -> ~(x & y) */
976 (simplify
977 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
978 (bit_not (bit_and @0 @1)))
979
980 /* (x | y) & ~x -> y & ~x */
981 /* (x & y) | ~x -> y | ~x */
982 (for bitop (bit_and bit_ior)
983 rbitop (bit_ior bit_and)
984 (simplify
985 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
986 (bitop @1 @2)))
987
988 /* (x & y) ^ (x | y) -> x ^ y */
989 (simplify
990 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
991 (bit_xor @0 @1))
992
993 /* (x ^ y) ^ (x | y) -> x & y */
994 (simplify
995 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
996 (bit_and @0 @1))
997
998 /* (x & y) + (x ^ y) -> x | y */
999 /* (x & y) | (x ^ y) -> x | y */
1000 /* (x & y) ^ (x ^ y) -> x | y */
1001 (for op (plus bit_ior bit_xor)
1002 (simplify
1003 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1004 (bit_ior @0 @1)))
1005
1006 /* (x & y) + (x | y) -> x + y */
1007 (simplify
1008 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1009 (plus @0 @1))
1010
1011 /* (x + y) - (x | y) -> x & y */
1012 (simplify
1013 (minus (plus @0 @1) (bit_ior @0 @1))
1014 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1015 && !TYPE_SATURATING (type))
1016 (bit_and @0 @1)))
1017
1018 /* (x + y) - (x & y) -> x | y */
1019 (simplify
1020 (minus (plus @0 @1) (bit_and @0 @1))
1021 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1022 && !TYPE_SATURATING (type))
1023 (bit_ior @0 @1)))
1024
1025 /* (x | y) - (x ^ y) -> x & y */
1026 (simplify
1027 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1028 (bit_and @0 @1))
1029
1030 /* (x | y) - (x & y) -> x ^ y */
1031 (simplify
1032 (minus (bit_ior @0 @1) (bit_and @0 @1))
1033 (bit_xor @0 @1))
1034
1035 /* (x | y) & ~(x & y) -> x ^ y */
1036 (simplify
1037 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1038 (bit_xor @0 @1))
1039
1040 /* (x | y) & (~x ^ y) -> x & y */
1041 (simplify
1042 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1043 (bit_and @0 @1))
1044
1045 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1046 (simplify
1047 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1048 (bit_not (bit_xor @0 @1)))
1049
1050 /* (~x | y) ^ (x | ~y) -> x ^ y */
1051 (simplify
1052 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1053 (bit_xor @0 @1))
1054
1055 /* ~x & ~y -> ~(x | y)
1056 ~x | ~y -> ~(x & y) */
1057 (for op (bit_and bit_ior)
1058 rop (bit_ior bit_and)
1059 (simplify
1060 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1061 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1062 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1063 (bit_not (rop (convert @0) (convert @1))))))
1064
1065 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1066 with a constant, and the two constants have no bits in common,
1067 we should treat this as a BIT_IOR_EXPR since this may produce more
1068 simplifications. */
1069 (for op (bit_xor plus)
1070 (simplify
1071 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1072 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1073 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1074 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1075 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1076 (bit_ior (convert @4) (convert @5)))))
1077
1078 /* (X | Y) ^ X -> Y & ~ X*/
1079 (simplify
1080 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1081 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1082 (convert (bit_and @1 (bit_not @0)))))
1083
1084 /* Convert ~X ^ ~Y to X ^ Y. */
1085 (simplify
1086 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1087 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1088 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1089 (bit_xor (convert @0) (convert @1))))
1090
1091 /* Convert ~X ^ C to X ^ ~C. */
1092 (simplify
1093 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1094 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1095 (bit_xor (convert @0) (bit_not @1))))
1096
1097 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1098 (for opo (bit_and bit_xor)
1099 opi (bit_xor bit_and)
1100 (simplify
1101 (opo:c (opi:cs @0 @1) @1)
1102 (bit_and (bit_not @0) @1)))
1103
1104 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1105 operands are another bit-wise operation with a common input. If so,
1106 distribute the bit operations to save an operation and possibly two if
1107 constants are involved. For example, convert
1108 (A | B) & (A | C) into A | (B & C)
1109 Further simplification will occur if B and C are constants. */
1110 (for op (bit_and bit_ior bit_xor)
1111 rop (bit_ior bit_and bit_and)
1112 (simplify
1113 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1114 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1115 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1116 (rop (convert @0) (op (convert @1) (convert @2))))))
1117
1118 /* Some simple reassociation for bit operations, also handled in reassoc. */
1119 /* (X & Y) & Y -> X & Y
1120 (X | Y) | Y -> X | Y */
1121 (for op (bit_and bit_ior)
1122 (simplify
1123 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1124 @2))
1125 /* (X ^ Y) ^ Y -> X */
1126 (simplify
1127 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1128 (convert @0))
1129 /* (X & Y) & (X & Z) -> (X & Y) & Z
1130 (X | Y) | (X | Z) -> (X | Y) | Z */
1131 (for op (bit_and bit_ior)
1132 (simplify
1133 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1134 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1135 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1136 (if (single_use (@5) && single_use (@6))
1137 (op @3 (convert @2))
1138 (if (single_use (@3) && single_use (@4))
1139 (op (convert @1) @5))))))
1140 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1141 (simplify
1142 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1143 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1144 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1145 (bit_xor (convert @1) (convert @2))))
1146
1147 /* Convert abs (abs (X)) into abs (X).
1148 also absu (absu (X)) into absu (X). */
1149 (simplify
1150 (abs (abs@1 @0))
1151 @1)
1152
1153 (simplify
1154 (absu (convert@2 (absu@1 @0)))
1155 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1156 @1))
1157
1158 /* Convert abs[u] (-X) -> abs[u] (X). */
1159 (simplify
1160 (abs (negate @0))
1161 (abs @0))
1162
1163 (simplify
1164 (absu (negate @0))
1165 (absu @0))
1166
1167 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1168 (simplify
1169 (abs tree_expr_nonnegative_p@0)
1170 @0)
1171
1172 (simplify
1173 (absu tree_expr_nonnegative_p@0)
1174 (convert @0))
1175
1176 /* A few cases of fold-const.c negate_expr_p predicate. */
1177 (match negate_expr_p
1178 INTEGER_CST
1179 (if ((INTEGRAL_TYPE_P (type)
1180 && TYPE_UNSIGNED (type))
1181 || (!TYPE_OVERFLOW_SANITIZED (type)
1182 && may_negate_without_overflow_p (t)))))
1183 (match negate_expr_p
1184 FIXED_CST)
1185 (match negate_expr_p
1186 (negate @0)
1187 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1188 (match negate_expr_p
1189 REAL_CST
1190 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1191 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1192 ways. */
1193 (match negate_expr_p
1194 VECTOR_CST
1195 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1196 (match negate_expr_p
1197 (minus @0 @1)
1198 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1199 || (FLOAT_TYPE_P (type)
1200 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1201 && !HONOR_SIGNED_ZEROS (type)))))
1202
1203 /* (-A) * (-B) -> A * B */
1204 (simplify
1205 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1206 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1207 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1208 (mult (convert @0) (convert (negate @1)))))
1209
1210 /* -(A + B) -> (-B) - A. */
1211 (simplify
1212 (negate (plus:c @0 negate_expr_p@1))
1213 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1214 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1215 (minus (negate @1) @0)))
1216
1217 /* -(A - B) -> B - A. */
1218 (simplify
1219 (negate (minus @0 @1))
1220 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1221 || (FLOAT_TYPE_P (type)
1222 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1223 && !HONOR_SIGNED_ZEROS (type)))
1224 (minus @1 @0)))
1225 (simplify
1226 (negate (pointer_diff @0 @1))
1227 (if (TYPE_OVERFLOW_UNDEFINED (type))
1228 (pointer_diff @1 @0)))
1229
1230 /* A - B -> A + (-B) if B is easily negatable. */
1231 (simplify
1232 (minus @0 negate_expr_p@1)
1233 (if (!FIXED_POINT_TYPE_P (type))
1234 (plus @0 (negate @1))))
1235
1236 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1237 when profitable.
1238 For bitwise binary operations apply operand conversions to the
1239 binary operation result instead of to the operands. This allows
1240 to combine successive conversions and bitwise binary operations.
1241 We combine the above two cases by using a conditional convert. */
1242 (for bitop (bit_and bit_ior bit_xor)
1243 (simplify
1244 (bitop (convert @0) (convert? @1))
1245 (if (((TREE_CODE (@1) == INTEGER_CST
1246 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1247 && int_fits_type_p (@1, TREE_TYPE (@0)))
1248 || types_match (@0, @1))
1249 /* ??? This transform conflicts with fold-const.c doing
1250 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1251 constants (if x has signed type, the sign bit cannot be set
1252 in c). This folds extension into the BIT_AND_EXPR.
1253 Restrict it to GIMPLE to avoid endless recursions. */
1254 && (bitop != BIT_AND_EXPR || GIMPLE)
1255 && (/* That's a good idea if the conversion widens the operand, thus
1256 after hoisting the conversion the operation will be narrower. */
1257 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1258 /* It's also a good idea if the conversion is to a non-integer
1259 mode. */
1260 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1261 /* Or if the precision of TO is not the same as the precision
1262 of its mode. */
1263 || !type_has_mode_precision_p (type)))
1264 (convert (bitop @0 (convert @1))))))
1265
1266 (for bitop (bit_and bit_ior)
1267 rbitop (bit_ior bit_and)
1268 /* (x | y) & x -> x */
1269 /* (x & y) | x -> x */
1270 (simplify
1271 (bitop:c (rbitop:c @0 @1) @0)
1272 @0)
1273 /* (~x | y) & x -> x & y */
1274 /* (~x & y) | x -> x | y */
1275 (simplify
1276 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1277 (bitop @0 @1)))
1278
1279 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1280 (simplify
1281 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1282 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1283
1284 /* Combine successive equal operations with constants. */
1285 (for bitop (bit_and bit_ior bit_xor)
1286 (simplify
1287 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1288 (if (!CONSTANT_CLASS_P (@0))
1289 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1290 folded to a constant. */
1291 (bitop @0 (bitop @1 @2))
1292 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1293 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1294 the values involved are such that the operation can't be decided at
1295 compile time. Try folding one of @0 or @1 with @2 to see whether
1296 that combination can be decided at compile time.
1297
1298 Keep the existing form if both folds fail, to avoid endless
1299 oscillation. */
1300 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1301 (if (cst1)
1302 (bitop @1 { cst1; })
1303 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1304 (if (cst2)
1305 (bitop @0 { cst2; }))))))))
1306
1307 /* Try simple folding for X op !X, and X op X with the help
1308 of the truth_valued_p and logical_inverted_value predicates. */
1309 (match truth_valued_p
1310 @0
1311 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1312 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1313 (match truth_valued_p
1314 (op @0 @1)))
1315 (match truth_valued_p
1316 (truth_not @0))
1317
1318 (match (logical_inverted_value @0)
1319 (truth_not @0))
1320 (match (logical_inverted_value @0)
1321 (bit_not truth_valued_p@0))
1322 (match (logical_inverted_value @0)
1323 (eq @0 integer_zerop))
1324 (match (logical_inverted_value @0)
1325 (ne truth_valued_p@0 integer_truep))
1326 (match (logical_inverted_value @0)
1327 (bit_xor truth_valued_p@0 integer_truep))
1328
1329 /* X & !X -> 0. */
1330 (simplify
1331 (bit_and:c @0 (logical_inverted_value @0))
1332 { build_zero_cst (type); })
1333 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1334 (for op (bit_ior bit_xor)
1335 (simplify
1336 (op:c truth_valued_p@0 (logical_inverted_value @0))
1337 { constant_boolean_node (true, type); }))
1338 /* X ==/!= !X is false/true. */
1339 (for op (eq ne)
1340 (simplify
1341 (op:c truth_valued_p@0 (logical_inverted_value @0))
1342 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1343
1344 /* ~~x -> x */
1345 (simplify
1346 (bit_not (bit_not @0))
1347 @0)
1348
1349 /* Convert ~ (-A) to A - 1. */
1350 (simplify
1351 (bit_not (convert? (negate @0)))
1352 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1353 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1354 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1355
1356 /* Convert - (~A) to A + 1. */
1357 (simplify
1358 (negate (nop_convert (bit_not @0)))
1359 (plus (view_convert @0) { build_each_one_cst (type); }))
1360
1361 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1362 (simplify
1363 (bit_not (convert? (minus @0 integer_each_onep)))
1364 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1365 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1366 (convert (negate @0))))
1367 (simplify
1368 (bit_not (convert? (plus @0 integer_all_onesp)))
1369 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1370 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1371 (convert (negate @0))))
1372
1373 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1374 (simplify
1375 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1376 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1377 (convert (bit_xor @0 (bit_not @1)))))
1378 (simplify
1379 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1380 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1381 (convert (bit_xor @0 @1))))
1382
1383 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1384 (simplify
1385 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1386 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1387 (bit_not (bit_xor (view_convert @0) @1))))
1388
1389 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1390 (simplify
1391 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1392 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1393
1394 /* Fold A - (A & B) into ~B & A. */
1395 (simplify
1396 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1397 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1398 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1399 (convert (bit_and (bit_not @1) @0))))
1400
1401 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1402 (for cmp (gt lt ge le)
1403 (simplify
1404 (mult (convert (cmp @0 @1)) @2)
1405 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1406
1407 /* For integral types with undefined overflow and C != 0 fold
1408 x * C EQ/NE y * C into x EQ/NE y. */
1409 (for cmp (eq ne)
1410 (simplify
1411 (cmp (mult:c @0 @1) (mult:c @2 @1))
1412 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1413 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1414 && tree_expr_nonzero_p (@1))
1415 (cmp @0 @2))))
1416
1417 /* For integral types with wrapping overflow and C odd fold
1418 x * C EQ/NE y * C into x EQ/NE y. */
1419 (for cmp (eq ne)
1420 (simplify
1421 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1422 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1423 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1424 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1425 (cmp @0 @2))))
1426
1427 /* For integral types with undefined overflow and C != 0 fold
1428 x * C RELOP y * C into:
1429
1430 x RELOP y for nonnegative C
1431 y RELOP x for negative C */
1432 (for cmp (lt gt le ge)
1433 (simplify
1434 (cmp (mult:c @0 @1) (mult:c @2 @1))
1435 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1436 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1437 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1438 (cmp @0 @2)
1439 (if (TREE_CODE (@1) == INTEGER_CST
1440 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1441 (cmp @2 @0))))))
1442
1443 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1444 (for cmp (le gt)
1445 icmp (gt le)
1446 (simplify
1447 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1448 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1449 && TYPE_UNSIGNED (TREE_TYPE (@0))
1450 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1451 && (wi::to_wide (@2)
1452 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1453 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1454 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1455
1456 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1457 (for cmp (simple_comparison)
1458 (simplify
1459 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1460 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1461 (cmp @0 @1))))
1462
1463 /* X / C1 op C2 into a simple range test. */
1464 (for cmp (simple_comparison)
1465 (simplify
1466 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1467 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1468 && integer_nonzerop (@1)
1469 && !TREE_OVERFLOW (@1)
1470 && !TREE_OVERFLOW (@2))
1471 (with { tree lo, hi; bool neg_overflow;
1472 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1473 &neg_overflow); }
1474 (switch
1475 (if (code == LT_EXPR || code == GE_EXPR)
1476 (if (TREE_OVERFLOW (lo))
1477 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1478 (if (code == LT_EXPR)
1479 (lt @0 { lo; })
1480 (ge @0 { lo; }))))
1481 (if (code == LE_EXPR || code == GT_EXPR)
1482 (if (TREE_OVERFLOW (hi))
1483 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1484 (if (code == LE_EXPR)
1485 (le @0 { hi; })
1486 (gt @0 { hi; }))))
1487 (if (!lo && !hi)
1488 { build_int_cst (type, code == NE_EXPR); })
1489 (if (code == EQ_EXPR && !hi)
1490 (ge @0 { lo; }))
1491 (if (code == EQ_EXPR && !lo)
1492 (le @0 { hi; }))
1493 (if (code == NE_EXPR && !hi)
1494 (lt @0 { lo; }))
1495 (if (code == NE_EXPR && !lo)
1496 (gt @0 { hi; }))
1497 (if (GENERIC)
1498 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1499 lo, hi); })
1500 (with
1501 {
1502 tree etype = range_check_type (TREE_TYPE (@0));
1503 if (etype)
1504 {
1505 if (! TYPE_UNSIGNED (etype))
1506 etype = unsigned_type_for (etype);
1507 hi = fold_convert (etype, hi);
1508 lo = fold_convert (etype, lo);
1509 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1510 }
1511 }
1512 (if (etype && hi && !TREE_OVERFLOW (hi))
1513 (if (code == EQ_EXPR)
1514 (le (minus (convert:etype @0) { lo; }) { hi; })
1515 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1516
1517 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1518 (for op (lt le ge gt)
1519 (simplify
1520 (op (plus:c @0 @2) (plus:c @1 @2))
1521 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1522 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1523 (op @0 @1))))
1524 /* For equality and subtraction, this is also true with wrapping overflow. */
1525 (for op (eq ne minus)
1526 (simplify
1527 (op (plus:c @0 @2) (plus:c @1 @2))
1528 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1529 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1530 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1531 (op @0 @1))))
1532
1533 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1534 (for op (lt le ge gt)
1535 (simplify
1536 (op (minus @0 @2) (minus @1 @2))
1537 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1538 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1539 (op @0 @1))))
1540 /* For equality and subtraction, this is also true with wrapping overflow. */
1541 (for op (eq ne minus)
1542 (simplify
1543 (op (minus @0 @2) (minus @1 @2))
1544 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1545 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1546 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1547 (op @0 @1))))
1548 /* And for pointers... */
1549 (for op (simple_comparison)
1550 (simplify
1551 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1552 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1553 (op @0 @1))))
1554 (simplify
1555 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1556 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1557 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1558 (pointer_diff @0 @1)))
1559
1560 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1561 (for op (lt le ge gt)
1562 (simplify
1563 (op (minus @2 @0) (minus @2 @1))
1564 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1565 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1566 (op @1 @0))))
1567 /* For equality and subtraction, this is also true with wrapping overflow. */
1568 (for op (eq ne minus)
1569 (simplify
1570 (op (minus @2 @0) (minus @2 @1))
1571 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1572 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1573 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1574 (op @1 @0))))
1575 /* And for pointers... */
1576 (for op (simple_comparison)
1577 (simplify
1578 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1579 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1580 (op @1 @0))))
1581 (simplify
1582 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1583 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1584 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1585 (pointer_diff @1 @0)))
1586
1587 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1588 (for op (lt le gt ge)
1589 (simplify
1590 (op:c (plus:c@2 @0 @1) @1)
1591 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1592 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1593 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1594 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1595 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1596 /* For equality, this is also true with wrapping overflow. */
1597 (for op (eq ne)
1598 (simplify
1599 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1600 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1601 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1602 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1603 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1604 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1605 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1606 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1607 (simplify
1608 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1609 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1610 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1611 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1612 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1613
1614 /* X - Y < X is the same as Y > 0 when there is no overflow.
1615 For equality, this is also true with wrapping overflow. */
1616 (for op (simple_comparison)
1617 (simplify
1618 (op:c @0 (minus@2 @0 @1))
1619 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1620 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1621 || ((op == EQ_EXPR || op == NE_EXPR)
1622 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1623 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1624 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1625
1626 /* Transform:
1627 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1628 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1629 (for cmp (eq ne)
1630 ocmp (lt ge)
1631 (simplify
1632 (cmp (trunc_div @0 @1) integer_zerop)
1633 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1634 /* Complex ==/!= is allowed, but not </>=. */
1635 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1636 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1637 (ocmp @0 @1))))
1638
1639 /* X == C - X can never be true if C is odd. */
1640 (for cmp (eq ne)
1641 (simplify
1642 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1643 (if (TREE_INT_CST_LOW (@1) & 1)
1644 { constant_boolean_node (cmp == NE_EXPR, type); })))
1645
1646 /* Arguments on which one can call get_nonzero_bits to get the bits
1647 possibly set. */
1648 (match with_possible_nonzero_bits
1649 INTEGER_CST@0)
1650 (match with_possible_nonzero_bits
1651 SSA_NAME@0
1652 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1653 /* Slightly extended version, do not make it recursive to keep it cheap. */
1654 (match (with_possible_nonzero_bits2 @0)
1655 with_possible_nonzero_bits@0)
1656 (match (with_possible_nonzero_bits2 @0)
1657 (bit_and:c with_possible_nonzero_bits@0 @2))
1658
1659 /* Same for bits that are known to be set, but we do not have
1660 an equivalent to get_nonzero_bits yet. */
1661 (match (with_certain_nonzero_bits2 @0)
1662 INTEGER_CST@0)
1663 (match (with_certain_nonzero_bits2 @0)
1664 (bit_ior @1 INTEGER_CST@0))
1665
1666 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1667 (for cmp (eq ne)
1668 (simplify
1669 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1670 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1671 { constant_boolean_node (cmp == NE_EXPR, type); })))
1672
1673 /* ((X inner_op C0) outer_op C1)
1674 With X being a tree where value_range has reasoned certain bits to always be
1675 zero throughout its computed value range,
1676 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1677 where zero_mask has 1's for all bits that are sure to be 0 in
1678 and 0's otherwise.
1679 if (inner_op == '^') C0 &= ~C1;
1680 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1681 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1682 */
1683 (for inner_op (bit_ior bit_xor)
1684 outer_op (bit_xor bit_ior)
1685 (simplify
1686 (outer_op
1687 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1688 (with
1689 {
1690 bool fail = false;
1691 wide_int zero_mask_not;
1692 wide_int C0;
1693 wide_int cst_emit;
1694
1695 if (TREE_CODE (@2) == SSA_NAME)
1696 zero_mask_not = get_nonzero_bits (@2);
1697 else
1698 fail = true;
1699
1700 if (inner_op == BIT_XOR_EXPR)
1701 {
1702 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1703 cst_emit = C0 | wi::to_wide (@1);
1704 }
1705 else
1706 {
1707 C0 = wi::to_wide (@0);
1708 cst_emit = C0 ^ wi::to_wide (@1);
1709 }
1710 }
1711 (if (!fail && (C0 & zero_mask_not) == 0)
1712 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1713 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1714 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1715
1716 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1717 (simplify
1718 (pointer_plus (pointer_plus:s @0 @1) @3)
1719 (pointer_plus @0 (plus @1 @3)))
1720
1721 /* Pattern match
1722 tem1 = (long) ptr1;
1723 tem2 = (long) ptr2;
1724 tem3 = tem2 - tem1;
1725 tem4 = (unsigned long) tem3;
1726 tem5 = ptr1 + tem4;
1727 and produce
1728 tem5 = ptr2; */
1729 (simplify
1730 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1731 /* Conditionally look through a sign-changing conversion. */
1732 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1733 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1734 || (GENERIC && type == TREE_TYPE (@1))))
1735 @1))
1736 (simplify
1737 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1738 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1739 (convert @1)))
1740
1741 /* Pattern match
1742 tem = (sizetype) ptr;
1743 tem = tem & algn;
1744 tem = -tem;
1745 ... = ptr p+ tem;
1746 and produce the simpler and easier to analyze with respect to alignment
1747 ... = ptr & ~algn; */
1748 (simplify
1749 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1750 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1751 (bit_and @0 { algn; })))
1752
1753 /* Try folding difference of addresses. */
1754 (simplify
1755 (minus (convert ADDR_EXPR@0) (convert @1))
1756 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1757 (with { poly_int64 diff; }
1758 (if (ptr_difference_const (@0, @1, &diff))
1759 { build_int_cst_type (type, diff); }))))
1760 (simplify
1761 (minus (convert @0) (convert ADDR_EXPR@1))
1762 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1763 (with { poly_int64 diff; }
1764 (if (ptr_difference_const (@0, @1, &diff))
1765 { build_int_cst_type (type, diff); }))))
1766 (simplify
1767 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1768 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1769 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1770 (with { poly_int64 diff; }
1771 (if (ptr_difference_const (@0, @1, &diff))
1772 { build_int_cst_type (type, diff); }))))
1773 (simplify
1774 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1775 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1776 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1777 (with { poly_int64 diff; }
1778 (if (ptr_difference_const (@0, @1, &diff))
1779 { build_int_cst_type (type, diff); }))))
1780
1781 /* If arg0 is derived from the address of an object or function, we may
1782 be able to fold this expression using the object or function's
1783 alignment. */
1784 (simplify
1785 (bit_and (convert? @0) INTEGER_CST@1)
1786 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1787 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1788 (with
1789 {
1790 unsigned int align;
1791 unsigned HOST_WIDE_INT bitpos;
1792 get_pointer_alignment_1 (@0, &align, &bitpos);
1793 }
1794 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1795 { wide_int_to_tree (type, (wi::to_wide (@1)
1796 & (bitpos / BITS_PER_UNIT))); }))))
1797
1798
1799 /* We can't reassociate at all for saturating types. */
1800 (if (!TYPE_SATURATING (type))
1801
1802 /* Contract negates. */
1803 /* A + (-B) -> A - B */
1804 (simplify
1805 (plus:c @0 (convert? (negate @1)))
1806 /* Apply STRIP_NOPS on the negate. */
1807 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1808 && !TYPE_OVERFLOW_SANITIZED (type))
1809 (with
1810 {
1811 tree t1 = type;
1812 if (INTEGRAL_TYPE_P (type)
1813 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1814 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1815 }
1816 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1817 /* A - (-B) -> A + B */
1818 (simplify
1819 (minus @0 (convert? (negate @1)))
1820 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1821 && !TYPE_OVERFLOW_SANITIZED (type))
1822 (with
1823 {
1824 tree t1 = type;
1825 if (INTEGRAL_TYPE_P (type)
1826 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1827 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1828 }
1829 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1830 /* -(T)(-A) -> (T)A
1831 Sign-extension is ok except for INT_MIN, which thankfully cannot
1832 happen without overflow. */
1833 (simplify
1834 (negate (convert (negate @1)))
1835 (if (INTEGRAL_TYPE_P (type)
1836 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1837 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1838 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1839 && !TYPE_OVERFLOW_SANITIZED (type)
1840 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1841 (convert @1)))
1842 (simplify
1843 (negate (convert negate_expr_p@1))
1844 (if (SCALAR_FLOAT_TYPE_P (type)
1845 && ((DECIMAL_FLOAT_TYPE_P (type)
1846 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1847 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1848 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1849 (convert (negate @1))))
1850 (simplify
1851 (negate (nop_convert (negate @1)))
1852 (if (!TYPE_OVERFLOW_SANITIZED (type)
1853 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1854 (view_convert @1)))
1855
1856 /* We can't reassociate floating-point unless -fassociative-math
1857 or fixed-point plus or minus because of saturation to +-Inf. */
1858 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1859 && !FIXED_POINT_TYPE_P (type))
1860
1861 /* Match patterns that allow contracting a plus-minus pair
1862 irrespective of overflow issues. */
1863 /* (A +- B) - A -> +- B */
1864 /* (A +- B) -+ B -> A */
1865 /* A - (A +- B) -> -+ B */
1866 /* A +- (B -+ A) -> +- B */
1867 (simplify
1868 (minus (plus:c @0 @1) @0)
1869 @1)
1870 (simplify
1871 (minus (minus @0 @1) @0)
1872 (negate @1))
1873 (simplify
1874 (plus:c (minus @0 @1) @1)
1875 @0)
1876 (simplify
1877 (minus @0 (plus:c @0 @1))
1878 (negate @1))
1879 (simplify
1880 (minus @0 (minus @0 @1))
1881 @1)
1882 /* (A +- B) + (C - A) -> C +- B */
1883 /* (A + B) - (A - C) -> B + C */
1884 /* More cases are handled with comparisons. */
1885 (simplify
1886 (plus:c (plus:c @0 @1) (minus @2 @0))
1887 (plus @2 @1))
1888 (simplify
1889 (plus:c (minus @0 @1) (minus @2 @0))
1890 (minus @2 @1))
1891 (simplify
1892 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1893 (if (TYPE_OVERFLOW_UNDEFINED (type)
1894 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1895 (pointer_diff @2 @1)))
1896 (simplify
1897 (minus (plus:c @0 @1) (minus @0 @2))
1898 (plus @1 @2))
1899
1900 /* (A +- CST1) +- CST2 -> A + CST3
1901 Use view_convert because it is safe for vectors and equivalent for
1902 scalars. */
1903 (for outer_op (plus minus)
1904 (for inner_op (plus minus)
1905 neg_inner_op (minus plus)
1906 (simplify
1907 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1908 CONSTANT_CLASS_P@2)
1909 /* If one of the types wraps, use that one. */
1910 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1911 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1912 forever if something doesn't simplify into a constant. */
1913 (if (!CONSTANT_CLASS_P (@0))
1914 (if (outer_op == PLUS_EXPR)
1915 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1916 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
1917 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1918 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1919 (if (outer_op == PLUS_EXPR)
1920 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1921 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1922 /* If the constant operation overflows we cannot do the transform
1923 directly as we would introduce undefined overflow, for example
1924 with (a - 1) + INT_MIN. */
1925 (if (types_match (type, @0))
1926 (with { tree cst = const_binop (outer_op == inner_op
1927 ? PLUS_EXPR : MINUS_EXPR,
1928 type, @1, @2); }
1929 (if (cst && !TREE_OVERFLOW (cst))
1930 (inner_op @0 { cst; } )
1931 /* X+INT_MAX+1 is X-INT_MIN. */
1932 (if (INTEGRAL_TYPE_P (type) && cst
1933 && wi::to_wide (cst) == wi::min_value (type))
1934 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1935 /* Last resort, use some unsigned type. */
1936 (with { tree utype = unsigned_type_for (type); }
1937 (if (utype)
1938 (view_convert (inner_op
1939 (view_convert:utype @0)
1940 (view_convert:utype
1941 { drop_tree_overflow (cst); }))))))))))))))
1942
1943 /* (CST1 - A) +- CST2 -> CST3 - A */
1944 (for outer_op (plus minus)
1945 (simplify
1946 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1947 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1948 (if (cst && !TREE_OVERFLOW (cst))
1949 (minus { cst; } @0)))))
1950
1951 /* CST1 - (CST2 - A) -> CST3 + A */
1952 (simplify
1953 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1954 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1955 (if (cst && !TREE_OVERFLOW (cst))
1956 (plus { cst; } @0))))
1957
1958 /* ~A + A -> -1 */
1959 (simplify
1960 (plus:c (bit_not @0) @0)
1961 (if (!TYPE_OVERFLOW_TRAPS (type))
1962 { build_all_ones_cst (type); }))
1963
1964 /* ~A + 1 -> -A */
1965 (simplify
1966 (plus (convert? (bit_not @0)) integer_each_onep)
1967 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1968 (negate (convert @0))))
1969
1970 /* -A - 1 -> ~A */
1971 (simplify
1972 (minus (convert? (negate @0)) integer_each_onep)
1973 (if (!TYPE_OVERFLOW_TRAPS (type)
1974 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1975 (bit_not (convert @0))))
1976
1977 /* -1 - A -> ~A */
1978 (simplify
1979 (minus integer_all_onesp @0)
1980 (bit_not @0))
1981
1982 /* (T)(P + A) - (T)P -> (T) A */
1983 (simplify
1984 (minus (convert (plus:c @@0 @1))
1985 (convert? @0))
1986 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1987 /* For integer types, if A has a smaller type
1988 than T the result depends on the possible
1989 overflow in P + A.
1990 E.g. T=size_t, A=(unsigned)429497295, P>0.
1991 However, if an overflow in P + A would cause
1992 undefined behavior, we can assume that there
1993 is no overflow. */
1994 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1995 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1996 (convert @1)))
1997 (simplify
1998 (minus (convert (pointer_plus @@0 @1))
1999 (convert @0))
2000 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2001 /* For pointer types, if the conversion of A to the
2002 final type requires a sign- or zero-extension,
2003 then we have to punt - it is not defined which
2004 one is correct. */
2005 || (POINTER_TYPE_P (TREE_TYPE (@0))
2006 && TREE_CODE (@1) == INTEGER_CST
2007 && tree_int_cst_sign_bit (@1) == 0))
2008 (convert @1)))
2009 (simplify
2010 (pointer_diff (pointer_plus @@0 @1) @0)
2011 /* The second argument of pointer_plus must be interpreted as signed, and
2012 thus sign-extended if necessary. */
2013 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2014 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2015 second arg is unsigned even when we need to consider it as signed,
2016 we don't want to diagnose overflow here. */
2017 (convert (view_convert:stype @1))))
2018
2019 /* (T)P - (T)(P + A) -> -(T) A */
2020 (simplify
2021 (minus (convert? @0)
2022 (convert (plus:c @@0 @1)))
2023 (if (INTEGRAL_TYPE_P (type)
2024 && TYPE_OVERFLOW_UNDEFINED (type)
2025 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2026 (with { tree utype = unsigned_type_for (type); }
2027 (convert (negate (convert:utype @1))))
2028 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2029 /* For integer types, if A has a smaller type
2030 than T the result depends on the possible
2031 overflow in P + A.
2032 E.g. T=size_t, A=(unsigned)429497295, P>0.
2033 However, if an overflow in P + A would cause
2034 undefined behavior, we can assume that there
2035 is no overflow. */
2036 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2037 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2038 (negate (convert @1)))))
2039 (simplify
2040 (minus (convert @0)
2041 (convert (pointer_plus @@0 @1)))
2042 (if (INTEGRAL_TYPE_P (type)
2043 && TYPE_OVERFLOW_UNDEFINED (type)
2044 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2045 (with { tree utype = unsigned_type_for (type); }
2046 (convert (negate (convert:utype @1))))
2047 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2048 /* For pointer types, if the conversion of A to the
2049 final type requires a sign- or zero-extension,
2050 then we have to punt - it is not defined which
2051 one is correct. */
2052 || (POINTER_TYPE_P (TREE_TYPE (@0))
2053 && TREE_CODE (@1) == INTEGER_CST
2054 && tree_int_cst_sign_bit (@1) == 0))
2055 (negate (convert @1)))))
2056 (simplify
2057 (pointer_diff @0 (pointer_plus @@0 @1))
2058 /* The second argument of pointer_plus must be interpreted as signed, and
2059 thus sign-extended if necessary. */
2060 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2061 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2062 second arg is unsigned even when we need to consider it as signed,
2063 we don't want to diagnose overflow here. */
2064 (negate (convert (view_convert:stype @1)))))
2065
2066 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2067 (simplify
2068 (minus (convert (plus:c @@0 @1))
2069 (convert (plus:c @0 @2)))
2070 (if (INTEGRAL_TYPE_P (type)
2071 && TYPE_OVERFLOW_UNDEFINED (type)
2072 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2073 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
2074 (with { tree utype = unsigned_type_for (type); }
2075 (convert (minus (convert:utype @1) (convert:utype @2))))
2076 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2077 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2078 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2079 /* For integer types, if A has a smaller type
2080 than T the result depends on the possible
2081 overflow in P + A.
2082 E.g. T=size_t, A=(unsigned)429497295, P>0.
2083 However, if an overflow in P + A would cause
2084 undefined behavior, we can assume that there
2085 is no overflow. */
2086 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2087 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2088 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2089 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2090 (minus (convert @1) (convert @2)))))
2091 (simplify
2092 (minus (convert (pointer_plus @@0 @1))
2093 (convert (pointer_plus @0 @2)))
2094 (if (INTEGRAL_TYPE_P (type)
2095 && TYPE_OVERFLOW_UNDEFINED (type)
2096 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2097 (with { tree utype = unsigned_type_for (type); }
2098 (convert (minus (convert:utype @1) (convert:utype @2))))
2099 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2100 /* For pointer types, if the conversion of A to the
2101 final type requires a sign- or zero-extension,
2102 then we have to punt - it is not defined which
2103 one is correct. */
2104 || (POINTER_TYPE_P (TREE_TYPE (@0))
2105 && TREE_CODE (@1) == INTEGER_CST
2106 && tree_int_cst_sign_bit (@1) == 0
2107 && TREE_CODE (@2) == INTEGER_CST
2108 && tree_int_cst_sign_bit (@2) == 0))
2109 (minus (convert @1) (convert @2)))))
2110 (simplify
2111 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2112 /* The second argument of pointer_plus must be interpreted as signed, and
2113 thus sign-extended if necessary. */
2114 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2115 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2116 second arg is unsigned even when we need to consider it as signed,
2117 we don't want to diagnose overflow here. */
2118 (minus (convert (view_convert:stype @1))
2119 (convert (view_convert:stype @2)))))))
2120
2121 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2122 Modeled after fold_plusminus_mult_expr. */
2123 (if (!TYPE_SATURATING (type)
2124 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2125 (for plusminus (plus minus)
2126 (simplify
2127 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2128 (if ((!ANY_INTEGRAL_TYPE_P (type)
2129 || TYPE_OVERFLOW_WRAPS (type)
2130 || (INTEGRAL_TYPE_P (type)
2131 && tree_expr_nonzero_p (@0)
2132 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2133 /* If @1 +- @2 is constant require a hard single-use on either
2134 original operand (but not on both). */
2135 && (single_use (@3) || single_use (@4)))
2136 (mult (plusminus @1 @2) @0)))
2137 /* We cannot generate constant 1 for fract. */
2138 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2139 (simplify
2140 (plusminus @0 (mult:c@3 @0 @2))
2141 (if ((!ANY_INTEGRAL_TYPE_P (type)
2142 || TYPE_OVERFLOW_WRAPS (type)
2143 || (INTEGRAL_TYPE_P (type)
2144 && tree_expr_nonzero_p (@0)
2145 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2146 && single_use (@3))
2147 (mult (plusminus { build_one_cst (type); } @2) @0)))
2148 (simplify
2149 (plusminus (mult:c@3 @0 @2) @0)
2150 (if ((!ANY_INTEGRAL_TYPE_P (type)
2151 || TYPE_OVERFLOW_WRAPS (type)
2152 || (INTEGRAL_TYPE_P (type)
2153 && tree_expr_nonzero_p (@0)
2154 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2155 && single_use (@3))
2156 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2157
2158 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
2159
2160 (for minmax (min max FMIN_ALL FMAX_ALL)
2161 (simplify
2162 (minmax @0 @0)
2163 @0))
2164 /* min(max(x,y),y) -> y. */
2165 (simplify
2166 (min:c (max:c @0 @1) @1)
2167 @1)
2168 /* max(min(x,y),y) -> y. */
2169 (simplify
2170 (max:c (min:c @0 @1) @1)
2171 @1)
2172 /* max(a,-a) -> abs(a). */
2173 (simplify
2174 (max:c @0 (negate @0))
2175 (if (TREE_CODE (type) != COMPLEX_TYPE
2176 && (! ANY_INTEGRAL_TYPE_P (type)
2177 || TYPE_OVERFLOW_UNDEFINED (type)))
2178 (abs @0)))
2179 /* min(a,-a) -> -abs(a). */
2180 (simplify
2181 (min:c @0 (negate @0))
2182 (if (TREE_CODE (type) != COMPLEX_TYPE
2183 && (! ANY_INTEGRAL_TYPE_P (type)
2184 || TYPE_OVERFLOW_UNDEFINED (type)))
2185 (negate (abs @0))))
2186 (simplify
2187 (min @0 @1)
2188 (switch
2189 (if (INTEGRAL_TYPE_P (type)
2190 && TYPE_MIN_VALUE (type)
2191 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2192 @1)
2193 (if (INTEGRAL_TYPE_P (type)
2194 && TYPE_MAX_VALUE (type)
2195 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2196 @0)))
2197 (simplify
2198 (max @0 @1)
2199 (switch
2200 (if (INTEGRAL_TYPE_P (type)
2201 && TYPE_MAX_VALUE (type)
2202 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2203 @1)
2204 (if (INTEGRAL_TYPE_P (type)
2205 && TYPE_MIN_VALUE (type)
2206 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2207 @0)))
2208
2209 /* max (a, a + CST) -> a + CST where CST is positive. */
2210 /* max (a, a + CST) -> a where CST is negative. */
2211 (simplify
2212 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2213 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2214 (if (tree_int_cst_sgn (@1) > 0)
2215 @2
2216 @0)))
2217
2218 /* min (a, a + CST) -> a where CST is positive. */
2219 /* min (a, a + CST) -> a + CST where CST is negative. */
2220 (simplify
2221 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2222 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2223 (if (tree_int_cst_sgn (@1) > 0)
2224 @0
2225 @2)))
2226
2227 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2228 and the outer convert demotes the expression back to x's type. */
2229 (for minmax (min max)
2230 (simplify
2231 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2232 (if (INTEGRAL_TYPE_P (type)
2233 && types_match (@1, type) && int_fits_type_p (@2, type)
2234 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2235 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2236 (minmax @1 (convert @2)))))
2237
2238 (for minmax (FMIN_ALL FMAX_ALL)
2239 /* If either argument is NaN, return the other one. Avoid the
2240 transformation if we get (and honor) a signalling NaN. */
2241 (simplify
2242 (minmax:c @0 REAL_CST@1)
2243 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2244 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2245 @0)))
2246 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2247 functions to return the numeric arg if the other one is NaN.
2248 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2249 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2250 worry about it either. */
2251 (if (flag_finite_math_only)
2252 (simplify
2253 (FMIN_ALL @0 @1)
2254 (min @0 @1))
2255 (simplify
2256 (FMAX_ALL @0 @1)
2257 (max @0 @1)))
2258 /* min (-A, -B) -> -max (A, B) */
2259 (for minmax (min max FMIN_ALL FMAX_ALL)
2260 maxmin (max min FMAX_ALL FMIN_ALL)
2261 (simplify
2262 (minmax (negate:s@2 @0) (negate:s@3 @1))
2263 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2264 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2265 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2266 (negate (maxmin @0 @1)))))
2267 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2268 MAX (~X, ~Y) -> ~MIN (X, Y) */
2269 (for minmax (min max)
2270 maxmin (max min)
2271 (simplify
2272 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2273 (bit_not (maxmin @0 @1))))
2274
2275 /* MIN (X, Y) == X -> X <= Y */
2276 (for minmax (min min max max)
2277 cmp (eq ne eq ne )
2278 out (le gt ge lt )
2279 (simplify
2280 (cmp:c (minmax:c @0 @1) @0)
2281 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2282 (out @0 @1))))
2283 /* MIN (X, 5) == 0 -> X == 0
2284 MIN (X, 5) == 7 -> false */
2285 (for cmp (eq ne)
2286 (simplify
2287 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2288 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2289 TYPE_SIGN (TREE_TYPE (@0))))
2290 { constant_boolean_node (cmp == NE_EXPR, type); }
2291 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2292 TYPE_SIGN (TREE_TYPE (@0))))
2293 (cmp @0 @2)))))
2294 (for cmp (eq ne)
2295 (simplify
2296 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2297 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2298 TYPE_SIGN (TREE_TYPE (@0))))
2299 { constant_boolean_node (cmp == NE_EXPR, type); }
2300 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2301 TYPE_SIGN (TREE_TYPE (@0))))
2302 (cmp @0 @2)))))
2303 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2304 (for minmax (min min max max min min max max )
2305 cmp (lt le gt ge gt ge lt le )
2306 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2307 (simplify
2308 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2309 (comb (cmp @0 @2) (cmp @1 @2))))
2310
2311 /* Simplifications of shift and rotates. */
2312
2313 (for rotate (lrotate rrotate)
2314 (simplify
2315 (rotate integer_all_onesp@0 @1)
2316 @0))
2317
2318 /* Optimize -1 >> x for arithmetic right shifts. */
2319 (simplify
2320 (rshift integer_all_onesp@0 @1)
2321 (if (!TYPE_UNSIGNED (type)
2322 && tree_expr_nonnegative_p (@1))
2323 @0))
2324
2325 /* Optimize (x >> c) << c into x & (-1<<c). */
2326 (simplify
2327 (lshift (rshift @0 INTEGER_CST@1) @1)
2328 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2329 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2330
2331 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2332 types. */
2333 (simplify
2334 (rshift (lshift @0 INTEGER_CST@1) @1)
2335 (if (TYPE_UNSIGNED (type)
2336 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2337 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2338
2339 (for shiftrotate (lrotate rrotate lshift rshift)
2340 (simplify
2341 (shiftrotate @0 integer_zerop)
2342 (non_lvalue @0))
2343 (simplify
2344 (shiftrotate integer_zerop@0 @1)
2345 @0)
2346 /* Prefer vector1 << scalar to vector1 << vector2
2347 if vector2 is uniform. */
2348 (for vec (VECTOR_CST CONSTRUCTOR)
2349 (simplify
2350 (shiftrotate @0 vec@1)
2351 (with { tree tem = uniform_vector_p (@1); }
2352 (if (tem)
2353 (shiftrotate @0 { tem; }))))))
2354
2355 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2356 Y is 0. Similarly for X >> Y. */
2357 #if GIMPLE
2358 (for shift (lshift rshift)
2359 (simplify
2360 (shift @0 SSA_NAME@1)
2361 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2362 (with {
2363 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2364 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2365 }
2366 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2367 @0)))))
2368 #endif
2369
2370 /* Rewrite an LROTATE_EXPR by a constant into an
2371 RROTATE_EXPR by a new constant. */
2372 (simplify
2373 (lrotate @0 INTEGER_CST@1)
2374 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2375 build_int_cst (TREE_TYPE (@1),
2376 element_precision (type)), @1); }))
2377
2378 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2379 (for op (lrotate rrotate rshift lshift)
2380 (simplify
2381 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2382 (with { unsigned int prec = element_precision (type); }
2383 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2384 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2385 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2386 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2387 (with { unsigned int low = (tree_to_uhwi (@1)
2388 + tree_to_uhwi (@2)); }
2389 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2390 being well defined. */
2391 (if (low >= prec)
2392 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2393 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2394 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2395 { build_zero_cst (type); }
2396 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2397 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2398
2399
2400 /* ((1 << A) & 1) != 0 -> A == 0
2401 ((1 << A) & 1) == 0 -> A != 0 */
2402 (for cmp (ne eq)
2403 icmp (eq ne)
2404 (simplify
2405 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2406 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2407
2408 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2409 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2410 if CST2 != 0. */
2411 (for cmp (ne eq)
2412 (simplify
2413 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2414 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2415 (if (cand < 0
2416 || (!integer_zerop (@2)
2417 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2418 { constant_boolean_node (cmp == NE_EXPR, type); }
2419 (if (!integer_zerop (@2)
2420 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2421 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2422
2423 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2424 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2425 if the new mask might be further optimized. */
2426 (for shift (lshift rshift)
2427 (simplify
2428 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2429 INTEGER_CST@2)
2430 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2431 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2432 && tree_fits_uhwi_p (@1)
2433 && tree_to_uhwi (@1) > 0
2434 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2435 (with
2436 {
2437 unsigned int shiftc = tree_to_uhwi (@1);
2438 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2439 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2440 tree shift_type = TREE_TYPE (@3);
2441 unsigned int prec;
2442
2443 if (shift == LSHIFT_EXPR)
2444 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2445 else if (shift == RSHIFT_EXPR
2446 && type_has_mode_precision_p (shift_type))
2447 {
2448 prec = TYPE_PRECISION (TREE_TYPE (@3));
2449 tree arg00 = @0;
2450 /* See if more bits can be proven as zero because of
2451 zero extension. */
2452 if (@3 != @0
2453 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2454 {
2455 tree inner_type = TREE_TYPE (@0);
2456 if (type_has_mode_precision_p (inner_type)
2457 && TYPE_PRECISION (inner_type) < prec)
2458 {
2459 prec = TYPE_PRECISION (inner_type);
2460 /* See if we can shorten the right shift. */
2461 if (shiftc < prec)
2462 shift_type = inner_type;
2463 /* Otherwise X >> C1 is all zeros, so we'll optimize
2464 it into (X, 0) later on by making sure zerobits
2465 is all ones. */
2466 }
2467 }
2468 zerobits = HOST_WIDE_INT_M1U;
2469 if (shiftc < prec)
2470 {
2471 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2472 zerobits <<= prec - shiftc;
2473 }
2474 /* For arithmetic shift if sign bit could be set, zerobits
2475 can contain actually sign bits, so no transformation is
2476 possible, unless MASK masks them all away. In that
2477 case the shift needs to be converted into logical shift. */
2478 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2479 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2480 {
2481 if ((mask & zerobits) == 0)
2482 shift_type = unsigned_type_for (TREE_TYPE (@3));
2483 else
2484 zerobits = 0;
2485 }
2486 }
2487 }
2488 /* ((X << 16) & 0xff00) is (X, 0). */
2489 (if ((mask & zerobits) == mask)
2490 { build_int_cst (type, 0); }
2491 (with { newmask = mask | zerobits; }
2492 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2493 (with
2494 {
2495 /* Only do the transformation if NEWMASK is some integer
2496 mode's mask. */
2497 for (prec = BITS_PER_UNIT;
2498 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2499 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2500 break;
2501 }
2502 (if (prec < HOST_BITS_PER_WIDE_INT
2503 || newmask == HOST_WIDE_INT_M1U)
2504 (with
2505 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2506 (if (!tree_int_cst_equal (newmaskt, @2))
2507 (if (shift_type != TREE_TYPE (@3))
2508 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2509 (bit_and @4 { newmaskt; })))))))))))))
2510
2511 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2512 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2513 (for shift (lshift rshift)
2514 (for bit_op (bit_and bit_xor bit_ior)
2515 (simplify
2516 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2517 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2518 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2519 (bit_op (shift (convert @0) @1) { mask; }))))))
2520
2521 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2522 (simplify
2523 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2524 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2525 && (element_precision (TREE_TYPE (@0))
2526 <= element_precision (TREE_TYPE (@1))
2527 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2528 (with
2529 { tree shift_type = TREE_TYPE (@0); }
2530 (convert (rshift (convert:shift_type @1) @2)))))
2531
2532 /* ~(~X >>r Y) -> X >>r Y
2533 ~(~X <<r Y) -> X <<r Y */
2534 (for rotate (lrotate rrotate)
2535 (simplify
2536 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2537 (if ((element_precision (TREE_TYPE (@0))
2538 <= element_precision (TREE_TYPE (@1))
2539 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2540 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2541 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2542 (with
2543 { tree rotate_type = TREE_TYPE (@0); }
2544 (convert (rotate (convert:rotate_type @1) @2))))))
2545
2546 /* Simplifications of conversions. */
2547
2548 /* Basic strip-useless-type-conversions / strip_nops. */
2549 (for cvt (convert view_convert float fix_trunc)
2550 (simplify
2551 (cvt @0)
2552 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2553 || (GENERIC && type == TREE_TYPE (@0)))
2554 @0)))
2555
2556 /* Contract view-conversions. */
2557 (simplify
2558 (view_convert (view_convert @0))
2559 (view_convert @0))
2560
2561 /* For integral conversions with the same precision or pointer
2562 conversions use a NOP_EXPR instead. */
2563 (simplify
2564 (view_convert @0)
2565 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2566 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2567 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2568 (convert @0)))
2569
2570 /* Strip inner integral conversions that do not change precision or size, or
2571 zero-extend while keeping the same size (for bool-to-char). */
2572 (simplify
2573 (view_convert (convert@0 @1))
2574 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2575 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2576 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2577 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2578 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2579 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2580 (view_convert @1)))
2581
2582 /* Re-association barriers around constants and other re-association
2583 barriers can be removed. */
2584 (simplify
2585 (paren CONSTANT_CLASS_P@0)
2586 @0)
2587 (simplify
2588 (paren (paren@1 @0))
2589 @1)
2590
2591 /* Handle cases of two conversions in a row. */
2592 (for ocvt (convert float fix_trunc)
2593 (for icvt (convert float)
2594 (simplify
2595 (ocvt (icvt@1 @0))
2596 (with
2597 {
2598 tree inside_type = TREE_TYPE (@0);
2599 tree inter_type = TREE_TYPE (@1);
2600 int inside_int = INTEGRAL_TYPE_P (inside_type);
2601 int inside_ptr = POINTER_TYPE_P (inside_type);
2602 int inside_float = FLOAT_TYPE_P (inside_type);
2603 int inside_vec = VECTOR_TYPE_P (inside_type);
2604 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2605 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2606 int inter_int = INTEGRAL_TYPE_P (inter_type);
2607 int inter_ptr = POINTER_TYPE_P (inter_type);
2608 int inter_float = FLOAT_TYPE_P (inter_type);
2609 int inter_vec = VECTOR_TYPE_P (inter_type);
2610 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2611 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2612 int final_int = INTEGRAL_TYPE_P (type);
2613 int final_ptr = POINTER_TYPE_P (type);
2614 int final_float = FLOAT_TYPE_P (type);
2615 int final_vec = VECTOR_TYPE_P (type);
2616 unsigned int final_prec = TYPE_PRECISION (type);
2617 int final_unsignedp = TYPE_UNSIGNED (type);
2618 }
2619 (switch
2620 /* In addition to the cases of two conversions in a row
2621 handled below, if we are converting something to its own
2622 type via an object of identical or wider precision, neither
2623 conversion is needed. */
2624 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2625 || (GENERIC
2626 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2627 && (((inter_int || inter_ptr) && final_int)
2628 || (inter_float && final_float))
2629 && inter_prec >= final_prec)
2630 (ocvt @0))
2631
2632 /* Likewise, if the intermediate and initial types are either both
2633 float or both integer, we don't need the middle conversion if the
2634 former is wider than the latter and doesn't change the signedness
2635 (for integers). Avoid this if the final type is a pointer since
2636 then we sometimes need the middle conversion. */
2637 (if (((inter_int && inside_int) || (inter_float && inside_float))
2638 && (final_int || final_float)
2639 && inter_prec >= inside_prec
2640 && (inter_float || inter_unsignedp == inside_unsignedp))
2641 (ocvt @0))
2642
2643 /* If we have a sign-extension of a zero-extended value, we can
2644 replace that by a single zero-extension. Likewise if the
2645 final conversion does not change precision we can drop the
2646 intermediate conversion. */
2647 (if (inside_int && inter_int && final_int
2648 && ((inside_prec < inter_prec && inter_prec < final_prec
2649 && inside_unsignedp && !inter_unsignedp)
2650 || final_prec == inter_prec))
2651 (ocvt @0))
2652
2653 /* Two conversions in a row are not needed unless:
2654 - some conversion is floating-point (overstrict for now), or
2655 - some conversion is a vector (overstrict for now), or
2656 - the intermediate type is narrower than both initial and
2657 final, or
2658 - the intermediate type and innermost type differ in signedness,
2659 and the outermost type is wider than the intermediate, or
2660 - the initial type is a pointer type and the precisions of the
2661 intermediate and final types differ, or
2662 - the final type is a pointer type and the precisions of the
2663 initial and intermediate types differ. */
2664 (if (! inside_float && ! inter_float && ! final_float
2665 && ! inside_vec && ! inter_vec && ! final_vec
2666 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2667 && ! (inside_int && inter_int
2668 && inter_unsignedp != inside_unsignedp
2669 && inter_prec < final_prec)
2670 && ((inter_unsignedp && inter_prec > inside_prec)
2671 == (final_unsignedp && final_prec > inter_prec))
2672 && ! (inside_ptr && inter_prec != final_prec)
2673 && ! (final_ptr && inside_prec != inter_prec))
2674 (ocvt @0))
2675
2676 /* A truncation to an unsigned type (a zero-extension) should be
2677 canonicalized as bitwise and of a mask. */
2678 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2679 && final_int && inter_int && inside_int
2680 && final_prec == inside_prec
2681 && final_prec > inter_prec
2682 && inter_unsignedp)
2683 (convert (bit_and @0 { wide_int_to_tree
2684 (inside_type,
2685 wi::mask (inter_prec, false,
2686 TYPE_PRECISION (inside_type))); })))
2687
2688 /* If we are converting an integer to a floating-point that can
2689 represent it exactly and back to an integer, we can skip the
2690 floating-point conversion. */
2691 (if (GIMPLE /* PR66211 */
2692 && inside_int && inter_float && final_int &&
2693 (unsigned) significand_size (TYPE_MODE (inter_type))
2694 >= inside_prec - !inside_unsignedp)
2695 (convert @0)))))))
2696
2697 /* If we have a narrowing conversion to an integral type that is fed by a
2698 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2699 masks off bits outside the final type (and nothing else). */
2700 (simplify
2701 (convert (bit_and @0 INTEGER_CST@1))
2702 (if (INTEGRAL_TYPE_P (type)
2703 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2704 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2705 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2706 TYPE_PRECISION (type)), 0))
2707 (convert @0)))
2708
2709
2710 /* (X /[ex] A) * A -> X. */
2711 (simplify
2712 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2713 (convert @0))
2714
2715 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
2716 (for op (plus minus)
2717 (simplify
2718 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2719 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2720 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2721 (with
2722 {
2723 wi::overflow_type overflow;
2724 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2725 TYPE_SIGN (type), &overflow);
2726 }
2727 (if (types_match (type, TREE_TYPE (@2))
2728 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2729 (op @0 { wide_int_to_tree (type, mul); })
2730 (with { tree utype = unsigned_type_for (type); }
2731 (convert (op (convert:utype @0)
2732 (mult (convert:utype @1) (convert:utype @2))))))))))
2733
2734 /* Canonicalization of binary operations. */
2735
2736 /* Convert X + -C into X - C. */
2737 (simplify
2738 (plus @0 REAL_CST@1)
2739 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2740 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2741 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2742 (minus @0 { tem; })))))
2743
2744 /* Convert x+x into x*2. */
2745 (simplify
2746 (plus @0 @0)
2747 (if (SCALAR_FLOAT_TYPE_P (type))
2748 (mult @0 { build_real (type, dconst2); })
2749 (if (INTEGRAL_TYPE_P (type))
2750 (mult @0 { build_int_cst (type, 2); }))))
2751
2752 /* 0 - X -> -X. */
2753 (simplify
2754 (minus integer_zerop @1)
2755 (negate @1))
2756 (simplify
2757 (pointer_diff integer_zerop @1)
2758 (negate (convert @1)))
2759
2760 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2761 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2762 (-ARG1 + ARG0) reduces to -ARG1. */
2763 (simplify
2764 (minus real_zerop@0 @1)
2765 (if (fold_real_zero_addition_p (type, @0, 0))
2766 (negate @1)))
2767
2768 /* Transform x * -1 into -x. */
2769 (simplify
2770 (mult @0 integer_minus_onep)
2771 (negate @0))
2772
2773 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2774 signed overflow for CST != 0 && CST != -1. */
2775 (simplify
2776 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
2777 (if (TREE_CODE (@2) != INTEGER_CST
2778 && single_use (@3)
2779 && !integer_zerop (@1) && !integer_minus_onep (@1))
2780 (mult (mult @0 @2) @1)))
2781
2782 /* True if we can easily extract the real and imaginary parts of a complex
2783 number. */
2784 (match compositional_complex
2785 (convert? (complex @0 @1)))
2786
2787 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2788 (simplify
2789 (complex (realpart @0) (imagpart @0))
2790 @0)
2791 (simplify
2792 (realpart (complex @0 @1))
2793 @0)
2794 (simplify
2795 (imagpart (complex @0 @1))
2796 @1)
2797
2798 /* Sometimes we only care about half of a complex expression. */
2799 (simplify
2800 (realpart (convert?:s (conj:s @0)))
2801 (convert (realpart @0)))
2802 (simplify
2803 (imagpart (convert?:s (conj:s @0)))
2804 (convert (negate (imagpart @0))))
2805 (for part (realpart imagpart)
2806 (for op (plus minus)
2807 (simplify
2808 (part (convert?:s@2 (op:s @0 @1)))
2809 (convert (op (part @0) (part @1))))))
2810 (simplify
2811 (realpart (convert?:s (CEXPI:s @0)))
2812 (convert (COS @0)))
2813 (simplify
2814 (imagpart (convert?:s (CEXPI:s @0)))
2815 (convert (SIN @0)))
2816
2817 /* conj(conj(x)) -> x */
2818 (simplify
2819 (conj (convert? (conj @0)))
2820 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2821 (convert @0)))
2822
2823 /* conj({x,y}) -> {x,-y} */
2824 (simplify
2825 (conj (convert?:s (complex:s @0 @1)))
2826 (with { tree itype = TREE_TYPE (type); }
2827 (complex (convert:itype @0) (negate (convert:itype @1)))))
2828
2829 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2830 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2831 (simplify
2832 (bswap (bswap @0))
2833 @0)
2834 (simplify
2835 (bswap (bit_not (bswap @0)))
2836 (bit_not @0))
2837 (for bitop (bit_xor bit_ior bit_and)
2838 (simplify
2839 (bswap (bitop:c (bswap @0) @1))
2840 (bitop @0 (bswap @1)))))
2841
2842
2843 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2844
2845 /* Simplify constant conditions.
2846 Only optimize constant conditions when the selected branch
2847 has the same type as the COND_EXPR. This avoids optimizing
2848 away "c ? x : throw", where the throw has a void type.
2849 Note that we cannot throw away the fold-const.c variant nor
2850 this one as we depend on doing this transform before possibly
2851 A ? B : B -> B triggers and the fold-const.c one can optimize
2852 0 ? A : B to B even if A has side-effects. Something
2853 genmatch cannot handle. */
2854 (simplify
2855 (cond INTEGER_CST@0 @1 @2)
2856 (if (integer_zerop (@0))
2857 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2858 @2)
2859 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2860 @1)))
2861 (simplify
2862 (vec_cond VECTOR_CST@0 @1 @2)
2863 (if (integer_all_onesp (@0))
2864 @1
2865 (if (integer_zerop (@0))
2866 @2)))
2867
2868 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2869 be extended. */
2870 /* This pattern implements two kinds simplification:
2871
2872 Case 1)
2873 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2874 1) Conversions are type widening from smaller type.
2875 2) Const c1 equals to c2 after canonicalizing comparison.
2876 3) Comparison has tree code LT, LE, GT or GE.
2877 This specific pattern is needed when (cmp (convert x) c) may not
2878 be simplified by comparison patterns because of multiple uses of
2879 x. It also makes sense here because simplifying across multiple
2880 referred var is always benefitial for complicated cases.
2881
2882 Case 2)
2883 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2884 (for cmp (lt le gt ge eq)
2885 (simplify
2886 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2887 (with
2888 {
2889 tree from_type = TREE_TYPE (@1);
2890 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2891 enum tree_code code = ERROR_MARK;
2892
2893 if (INTEGRAL_TYPE_P (from_type)
2894 && int_fits_type_p (@2, from_type)
2895 && (types_match (c1_type, from_type)
2896 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2897 && (TYPE_UNSIGNED (from_type)
2898 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2899 && (types_match (c2_type, from_type)
2900 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2901 && (TYPE_UNSIGNED (from_type)
2902 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2903 {
2904 if (cmp != EQ_EXPR)
2905 {
2906 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2907 {
2908 /* X <= Y - 1 equals to X < Y. */
2909 if (cmp == LE_EXPR)
2910 code = LT_EXPR;
2911 /* X > Y - 1 equals to X >= Y. */
2912 if (cmp == GT_EXPR)
2913 code = GE_EXPR;
2914 }
2915 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2916 {
2917 /* X < Y + 1 equals to X <= Y. */
2918 if (cmp == LT_EXPR)
2919 code = LE_EXPR;
2920 /* X >= Y + 1 equals to X > Y. */
2921 if (cmp == GE_EXPR)
2922 code = GT_EXPR;
2923 }
2924 if (code != ERROR_MARK
2925 || wi::to_widest (@2) == wi::to_widest (@3))
2926 {
2927 if (cmp == LT_EXPR || cmp == LE_EXPR)
2928 code = MIN_EXPR;
2929 if (cmp == GT_EXPR || cmp == GE_EXPR)
2930 code = MAX_EXPR;
2931 }
2932 }
2933 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2934 else if (int_fits_type_p (@3, from_type))
2935 code = EQ_EXPR;
2936 }
2937 }
2938 (if (code == MAX_EXPR)
2939 (convert (max @1 (convert @2)))
2940 (if (code == MIN_EXPR)
2941 (convert (min @1 (convert @2)))
2942 (if (code == EQ_EXPR)
2943 (convert (cond (eq @1 (convert @3))
2944 (convert:from_type @3) (convert:from_type @2)))))))))
2945
2946 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2947
2948 1) OP is PLUS or MINUS.
2949 2) CMP is LT, LE, GT or GE.
2950 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2951
2952 This pattern also handles special cases like:
2953
2954 A) Operand x is a unsigned to signed type conversion and c1 is
2955 integer zero. In this case,
2956 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2957 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2958 B) Const c1 may not equal to (C3 op' C2). In this case we also
2959 check equality for (c1+1) and (c1-1) by adjusting comparison
2960 code.
2961
2962 TODO: Though signed type is handled by this pattern, it cannot be
2963 simplified at the moment because C standard requires additional
2964 type promotion. In order to match&simplify it here, the IR needs
2965 to be cleaned up by other optimizers, i.e, VRP. */
2966 (for op (plus minus)
2967 (for cmp (lt le gt ge)
2968 (simplify
2969 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2970 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2971 (if (types_match (from_type, to_type)
2972 /* Check if it is special case A). */
2973 || (TYPE_UNSIGNED (from_type)
2974 && !TYPE_UNSIGNED (to_type)
2975 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2976 && integer_zerop (@1)
2977 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2978 (with
2979 {
2980 wi::overflow_type overflow = wi::OVF_NONE;
2981 enum tree_code code, cmp_code = cmp;
2982 wide_int real_c1;
2983 wide_int c1 = wi::to_wide (@1);
2984 wide_int c2 = wi::to_wide (@2);
2985 wide_int c3 = wi::to_wide (@3);
2986 signop sgn = TYPE_SIGN (from_type);
2987
2988 /* Handle special case A), given x of unsigned type:
2989 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2990 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2991 if (!types_match (from_type, to_type))
2992 {
2993 if (cmp_code == LT_EXPR)
2994 cmp_code = GT_EXPR;
2995 if (cmp_code == GE_EXPR)
2996 cmp_code = LE_EXPR;
2997 c1 = wi::max_value (to_type);
2998 }
2999 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
3000 compute (c3 op' c2) and check if it equals to c1 with op' being
3001 the inverted operator of op. Make sure overflow doesn't happen
3002 if it is undefined. */
3003 if (op == PLUS_EXPR)
3004 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3005 else
3006 real_c1 = wi::add (c3, c2, sgn, &overflow);
3007
3008 code = cmp_code;
3009 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3010 {
3011 /* Check if c1 equals to real_c1. Boundary condition is handled
3012 by adjusting comparison operation if necessary. */
3013 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3014 && !overflow)
3015 {
3016 /* X <= Y - 1 equals to X < Y. */
3017 if (cmp_code == LE_EXPR)
3018 code = LT_EXPR;
3019 /* X > Y - 1 equals to X >= Y. */
3020 if (cmp_code == GT_EXPR)
3021 code = GE_EXPR;
3022 }
3023 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3024 && !overflow)
3025 {
3026 /* X < Y + 1 equals to X <= Y. */
3027 if (cmp_code == LT_EXPR)
3028 code = LE_EXPR;
3029 /* X >= Y + 1 equals to X > Y. */
3030 if (cmp_code == GE_EXPR)
3031 code = GT_EXPR;
3032 }
3033 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3034 {
3035 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3036 code = MIN_EXPR;
3037 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3038 code = MAX_EXPR;
3039 }
3040 }
3041 }
3042 (if (code == MAX_EXPR)
3043 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3044 { wide_int_to_tree (from_type, c2); })
3045 (if (code == MIN_EXPR)
3046 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3047 { wide_int_to_tree (from_type, c2); })))))))))
3048
3049 (for cnd (cond vec_cond)
3050 /* A ? B : (A ? X : C) -> A ? B : C. */
3051 (simplify
3052 (cnd @0 (cnd @0 @1 @2) @3)
3053 (cnd @0 @1 @3))
3054 (simplify
3055 (cnd @0 @1 (cnd @0 @2 @3))
3056 (cnd @0 @1 @3))
3057 /* A ? B : (!A ? C : X) -> A ? B : C. */
3058 /* ??? This matches embedded conditions open-coded because genmatch
3059 would generate matching code for conditions in separate stmts only.
3060 The following is still important to merge then and else arm cases
3061 from if-conversion. */
3062 (simplify
3063 (cnd @0 @1 (cnd @2 @3 @4))
3064 (if (inverse_conditions_p (@0, @2))
3065 (cnd @0 @1 @3)))
3066 (simplify
3067 (cnd @0 (cnd @1 @2 @3) @4)
3068 (if (inverse_conditions_p (@0, @1))
3069 (cnd @0 @3 @4)))
3070
3071 /* A ? B : B -> B. */
3072 (simplify
3073 (cnd @0 @1 @1)
3074 @1)
3075
3076 /* !A ? B : C -> A ? C : B. */
3077 (simplify
3078 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3079 (cnd @0 @2 @1)))
3080
3081 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3082 return all -1 or all 0 results. */
3083 /* ??? We could instead convert all instances of the vec_cond to negate,
3084 but that isn't necessarily a win on its own. */
3085 (simplify
3086 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3087 (if (VECTOR_TYPE_P (type)
3088 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3089 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3090 && (TYPE_MODE (TREE_TYPE (type))
3091 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3092 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3093
3094 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
3095 (simplify
3096 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3097 (if (VECTOR_TYPE_P (type)
3098 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3099 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3100 && (TYPE_MODE (TREE_TYPE (type))
3101 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3102 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3103
3104
3105 /* Simplifications of comparisons. */
3106
3107 /* See if we can reduce the magnitude of a constant involved in a
3108 comparison by changing the comparison code. This is a canonicalization
3109 formerly done by maybe_canonicalize_comparison_1. */
3110 (for cmp (le gt)
3111 acmp (lt ge)
3112 (simplify
3113 (cmp @0 uniform_integer_cst_p@1)
3114 (with { tree cst = uniform_integer_cst_p (@1); }
3115 (if (tree_int_cst_sgn (cst) == -1)
3116 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3117 wide_int_to_tree (TREE_TYPE (cst),
3118 wi::to_wide (cst)
3119 + 1)); })))))
3120 (for cmp (ge lt)
3121 acmp (gt le)
3122 (simplify
3123 (cmp @0 uniform_integer_cst_p@1)
3124 (with { tree cst = uniform_integer_cst_p (@1); }
3125 (if (tree_int_cst_sgn (cst) == 1)
3126 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3127 wide_int_to_tree (TREE_TYPE (cst),
3128 wi::to_wide (cst) - 1)); })))))
3129
3130 /* We can simplify a logical negation of a comparison to the
3131 inverted comparison. As we cannot compute an expression
3132 operator using invert_tree_comparison we have to simulate
3133 that with expression code iteration. */
3134 (for cmp (tcc_comparison)
3135 icmp (inverted_tcc_comparison)
3136 ncmp (inverted_tcc_comparison_with_nans)
3137 /* Ideally we'd like to combine the following two patterns
3138 and handle some more cases by using
3139 (logical_inverted_value (cmp @0 @1))
3140 here but for that genmatch would need to "inline" that.
3141 For now implement what forward_propagate_comparison did. */
3142 (simplify
3143 (bit_not (cmp @0 @1))
3144 (if (VECTOR_TYPE_P (type)
3145 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3146 /* Comparison inversion may be impossible for trapping math,
3147 invert_tree_comparison will tell us. But we can't use
3148 a computed operator in the replacement tree thus we have
3149 to play the trick below. */
3150 (with { enum tree_code ic = invert_tree_comparison
3151 (cmp, HONOR_NANS (@0)); }
3152 (if (ic == icmp)
3153 (icmp @0 @1)
3154 (if (ic == ncmp)
3155 (ncmp @0 @1))))))
3156 (simplify
3157 (bit_xor (cmp @0 @1) integer_truep)
3158 (with { enum tree_code ic = invert_tree_comparison
3159 (cmp, HONOR_NANS (@0)); }
3160 (if (ic == icmp)
3161 (icmp @0 @1)
3162 (if (ic == ncmp)
3163 (ncmp @0 @1))))))
3164
3165 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3166 ??? The transformation is valid for the other operators if overflow
3167 is undefined for the type, but performing it here badly interacts
3168 with the transformation in fold_cond_expr_with_comparison which
3169 attempts to synthetize ABS_EXPR. */
3170 (for cmp (eq ne)
3171 (for sub (minus pointer_diff)
3172 (simplify
3173 (cmp (sub@2 @0 @1) integer_zerop)
3174 (if (single_use (@2))
3175 (cmp @0 @1)))))
3176
3177 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3178 signed arithmetic case. That form is created by the compiler
3179 often enough for folding it to be of value. One example is in
3180 computing loop trip counts after Operator Strength Reduction. */
3181 (for cmp (simple_comparison)
3182 scmp (swapped_simple_comparison)
3183 (simplify
3184 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3185 /* Handle unfolded multiplication by zero. */
3186 (if (integer_zerop (@1))
3187 (cmp @1 @2)
3188 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3189 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3190 && single_use (@3))
3191 /* If @1 is negative we swap the sense of the comparison. */
3192 (if (tree_int_cst_sgn (@1) < 0)
3193 (scmp @0 @2)
3194 (cmp @0 @2))))))
3195
3196 /* Simplify comparison of something with itself. For IEEE
3197 floating-point, we can only do some of these simplifications. */
3198 (for cmp (eq ge le)
3199 (simplify
3200 (cmp @0 @0)
3201 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3202 || ! HONOR_NANS (@0))
3203 { constant_boolean_node (true, type); }
3204 (if (cmp != EQ_EXPR)
3205 (eq @0 @0)))))
3206 (for cmp (ne gt lt)
3207 (simplify
3208 (cmp @0 @0)
3209 (if (cmp != NE_EXPR
3210 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3211 || ! HONOR_NANS (@0))
3212 { constant_boolean_node (false, type); })))
3213 (for cmp (unle unge uneq)
3214 (simplify
3215 (cmp @0 @0)
3216 { constant_boolean_node (true, type); }))
3217 (for cmp (unlt ungt)
3218 (simplify
3219 (cmp @0 @0)
3220 (unordered @0 @0)))
3221 (simplify
3222 (ltgt @0 @0)
3223 (if (!flag_trapping_math)
3224 { constant_boolean_node (false, type); }))
3225
3226 /* Fold ~X op ~Y as Y op X. */
3227 (for cmp (simple_comparison)
3228 (simplify
3229 (cmp (bit_not@2 @0) (bit_not@3 @1))
3230 (if (single_use (@2) && single_use (@3))
3231 (cmp @1 @0))))
3232
3233 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3234 (for cmp (simple_comparison)
3235 scmp (swapped_simple_comparison)
3236 (simplify
3237 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3238 (if (single_use (@2)
3239 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3240 (scmp @0 (bit_not @1)))))
3241
3242 (for cmp (simple_comparison)
3243 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3244 (simplify
3245 (cmp (convert@2 @0) (convert? @1))
3246 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3247 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3248 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3249 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3250 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3251 (with
3252 {
3253 tree type1 = TREE_TYPE (@1);
3254 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3255 {
3256 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3257 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3258 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3259 type1 = float_type_node;
3260 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3261 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3262 type1 = double_type_node;
3263 }
3264 tree newtype
3265 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3266 ? TREE_TYPE (@0) : type1);
3267 }
3268 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3269 (cmp (convert:newtype @0) (convert:newtype @1))))))
3270
3271 (simplify
3272 (cmp @0 REAL_CST@1)
3273 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3274 (switch
3275 /* a CMP (-0) -> a CMP 0 */
3276 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3277 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3278 /* x != NaN is always true, other ops are always false. */
3279 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3280 && ! HONOR_SNANS (@1))
3281 { constant_boolean_node (cmp == NE_EXPR, type); })
3282 /* Fold comparisons against infinity. */
3283 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3284 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3285 (with
3286 {
3287 REAL_VALUE_TYPE max;
3288 enum tree_code code = cmp;
3289 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3290 if (neg)
3291 code = swap_tree_comparison (code);
3292 }
3293 (switch
3294 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3295 (if (code == GT_EXPR
3296 && !(HONOR_NANS (@0) && flag_trapping_math))
3297 { constant_boolean_node (false, type); })
3298 (if (code == LE_EXPR)
3299 /* x <= +Inf is always true, if we don't care about NaNs. */
3300 (if (! HONOR_NANS (@0))
3301 { constant_boolean_node (true, type); }
3302 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3303 an "invalid" exception. */
3304 (if (!flag_trapping_math)
3305 (eq @0 @0))))
3306 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3307 for == this introduces an exception for x a NaN. */
3308 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3309 || code == GE_EXPR)
3310 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3311 (if (neg)
3312 (lt @0 { build_real (TREE_TYPE (@0), max); })
3313 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3314 /* x < +Inf is always equal to x <= DBL_MAX. */
3315 (if (code == LT_EXPR)
3316 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3317 (if (neg)
3318 (ge @0 { build_real (TREE_TYPE (@0), max); })
3319 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3320 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3321 an exception for x a NaN so use an unordered comparison. */
3322 (if (code == NE_EXPR)
3323 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3324 (if (! HONOR_NANS (@0))
3325 (if (neg)
3326 (ge @0 { build_real (TREE_TYPE (@0), max); })
3327 (le @0 { build_real (TREE_TYPE (@0), max); }))
3328 (if (neg)
3329 (unge @0 { build_real (TREE_TYPE (@0), max); })
3330 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3331
3332 /* If this is a comparison of a real constant with a PLUS_EXPR
3333 or a MINUS_EXPR of a real constant, we can convert it into a
3334 comparison with a revised real constant as long as no overflow
3335 occurs when unsafe_math_optimizations are enabled. */
3336 (if (flag_unsafe_math_optimizations)
3337 (for op (plus minus)
3338 (simplify
3339 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3340 (with
3341 {
3342 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3343 TREE_TYPE (@1), @2, @1);
3344 }
3345 (if (tem && !TREE_OVERFLOW (tem))
3346 (cmp @0 { tem; }))))))
3347
3348 /* Likewise, we can simplify a comparison of a real constant with
3349 a MINUS_EXPR whose first operand is also a real constant, i.e.
3350 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3351 floating-point types only if -fassociative-math is set. */
3352 (if (flag_associative_math)
3353 (simplify
3354 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3355 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3356 (if (tem && !TREE_OVERFLOW (tem))
3357 (cmp { tem; } @1)))))
3358
3359 /* Fold comparisons against built-in math functions. */
3360 (if (flag_unsafe_math_optimizations
3361 && ! flag_errno_math)
3362 (for sq (SQRT)
3363 (simplify
3364 (cmp (sq @0) REAL_CST@1)
3365 (switch
3366 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3367 (switch
3368 /* sqrt(x) < y is always false, if y is negative. */
3369 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3370 { constant_boolean_node (false, type); })
3371 /* sqrt(x) > y is always true, if y is negative and we
3372 don't care about NaNs, i.e. negative values of x. */
3373 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3374 { constant_boolean_node (true, type); })
3375 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3376 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3377 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3378 (switch
3379 /* sqrt(x) < 0 is always false. */
3380 (if (cmp == LT_EXPR)
3381 { constant_boolean_node (false, type); })
3382 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3383 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3384 { constant_boolean_node (true, type); })
3385 /* sqrt(x) <= 0 -> x == 0. */
3386 (if (cmp == LE_EXPR)
3387 (eq @0 @1))
3388 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3389 == or !=. In the last case:
3390
3391 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3392
3393 if x is negative or NaN. Due to -funsafe-math-optimizations,
3394 the results for other x follow from natural arithmetic. */
3395 (cmp @0 @1)))
3396 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3397 (with
3398 {
3399 REAL_VALUE_TYPE c2;
3400 real_arithmetic (&c2, MULT_EXPR,
3401 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3402 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3403 }
3404 (if (REAL_VALUE_ISINF (c2))
3405 /* sqrt(x) > y is x == +Inf, when y is very large. */
3406 (if (HONOR_INFINITIES (@0))
3407 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3408 { constant_boolean_node (false, type); })
3409 /* sqrt(x) > c is the same as x > c*c. */
3410 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3411 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3412 (with
3413 {
3414 REAL_VALUE_TYPE c2;
3415 real_arithmetic (&c2, MULT_EXPR,
3416 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3417 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3418 }
3419 (if (REAL_VALUE_ISINF (c2))
3420 (switch
3421 /* sqrt(x) < y is always true, when y is a very large
3422 value and we don't care about NaNs or Infinities. */
3423 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3424 { constant_boolean_node (true, type); })
3425 /* sqrt(x) < y is x != +Inf when y is very large and we
3426 don't care about NaNs. */
3427 (if (! HONOR_NANS (@0))
3428 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3429 /* sqrt(x) < y is x >= 0 when y is very large and we
3430 don't care about Infinities. */
3431 (if (! HONOR_INFINITIES (@0))
3432 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3433 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3434 (if (GENERIC)
3435 (truth_andif
3436 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3437 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3438 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3439 (if (! HONOR_NANS (@0))
3440 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3441 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3442 (if (GENERIC)
3443 (truth_andif
3444 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3445 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3446 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3447 (simplify
3448 (cmp (sq @0) (sq @1))
3449 (if (! HONOR_NANS (@0))
3450 (cmp @0 @1))))))
3451
3452 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
3453 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3454 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
3455 (simplify
3456 (cmp (float@0 @1) (float @2))
3457 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3458 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3459 (with
3460 {
3461 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3462 tree type1 = TREE_TYPE (@1);
3463 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3464 tree type2 = TREE_TYPE (@2);
3465 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3466 }
3467 (if (fmt.can_represent_integral_type_p (type1)
3468 && fmt.can_represent_integral_type_p (type2))
3469 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3470 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3471 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3472 && type1_signed_p >= type2_signed_p)
3473 (icmp @1 (convert @2))
3474 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3475 && type1_signed_p <= type2_signed_p)
3476 (icmp (convert:type2 @1) @2)
3477 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3478 && type1_signed_p == type2_signed_p)
3479 (icmp @1 @2))))))))))
3480
3481 /* Optimize various special cases of (FTYPE) N CMP CST. */
3482 (for cmp (lt le eq ne ge gt)
3483 icmp (le le eq ne ge ge)
3484 (simplify
3485 (cmp (float @0) REAL_CST@1)
3486 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3487 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3488 (with
3489 {
3490 tree itype = TREE_TYPE (@0);
3491 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3492 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3493 /* Be careful to preserve any potential exceptions due to
3494 NaNs. qNaNs are ok in == or != context.
3495 TODO: relax under -fno-trapping-math or
3496 -fno-signaling-nans. */
3497 bool exception_p
3498 = real_isnan (cst) && (cst->signalling
3499 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3500 }
3501 /* TODO: allow non-fitting itype and SNaNs when
3502 -fno-trapping-math. */
3503 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
3504 (with
3505 {
3506 signop isign = TYPE_SIGN (itype);
3507 REAL_VALUE_TYPE imin, imax;
3508 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3509 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3510
3511 REAL_VALUE_TYPE icst;
3512 if (cmp == GT_EXPR || cmp == GE_EXPR)
3513 real_ceil (&icst, fmt, cst);
3514 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3515 real_floor (&icst, fmt, cst);
3516 else
3517 real_trunc (&icst, fmt, cst);
3518
3519 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3520
3521 bool overflow_p = false;
3522 wide_int icst_val
3523 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3524 }
3525 (switch
3526 /* Optimize cases when CST is outside of ITYPE's range. */
3527 (if (real_compare (LT_EXPR, cst, &imin))
3528 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3529 type); })
3530 (if (real_compare (GT_EXPR, cst, &imax))
3531 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3532 type); })
3533 /* Remove cast if CST is an integer representable by ITYPE. */
3534 (if (cst_int_p)
3535 (cmp @0 { gcc_assert (!overflow_p);
3536 wide_int_to_tree (itype, icst_val); })
3537 )
3538 /* When CST is fractional, optimize
3539 (FTYPE) N == CST -> 0
3540 (FTYPE) N != CST -> 1. */
3541 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3542 { constant_boolean_node (cmp == NE_EXPR, type); })
3543 /* Otherwise replace with sensible integer constant. */
3544 (with
3545 {
3546 gcc_checking_assert (!overflow_p);
3547 }
3548 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3549
3550 /* Fold A /[ex] B CMP C to A CMP B * C. */
3551 (for cmp (eq ne)
3552 (simplify
3553 (cmp (exact_div @0 @1) INTEGER_CST@2)
3554 (if (!integer_zerop (@1))
3555 (if (wi::to_wide (@2) == 0)
3556 (cmp @0 @2)
3557 (if (TREE_CODE (@1) == INTEGER_CST)
3558 (with
3559 {
3560 wi::overflow_type ovf;
3561 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3562 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3563 }
3564 (if (ovf)
3565 { constant_boolean_node (cmp == NE_EXPR, type); }
3566 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3567 (for cmp (lt le gt ge)
3568 (simplify
3569 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3570 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3571 (with
3572 {
3573 wi::overflow_type ovf;
3574 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3575 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3576 }
3577 (if (ovf)
3578 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3579 TYPE_SIGN (TREE_TYPE (@2)))
3580 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3581 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3582
3583 /* Unordered tests if either argument is a NaN. */
3584 (simplify
3585 (bit_ior (unordered @0 @0) (unordered @1 @1))
3586 (if (types_match (@0, @1))
3587 (unordered @0 @1)))
3588 (simplify
3589 (bit_and (ordered @0 @0) (ordered @1 @1))
3590 (if (types_match (@0, @1))
3591 (ordered @0 @1)))
3592 (simplify
3593 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3594 @2)
3595 (simplify
3596 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3597 @2)
3598
3599 /* Simple range test simplifications. */
3600 /* A < B || A >= B -> true. */
3601 (for test1 (lt le le le ne ge)
3602 test2 (ge gt ge ne eq ne)
3603 (simplify
3604 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3605 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3606 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3607 { constant_boolean_node (true, type); })))
3608 /* A < B && A >= B -> false. */
3609 (for test1 (lt lt lt le ne eq)
3610 test2 (ge gt eq gt eq gt)
3611 (simplify
3612 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3613 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3614 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3615 { constant_boolean_node (false, type); })))
3616
3617 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3618 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3619
3620 Note that comparisons
3621 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3622 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3623 will be canonicalized to above so there's no need to
3624 consider them here.
3625 */
3626
3627 (for cmp (le gt)
3628 eqcmp (eq ne)
3629 (simplify
3630 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3631 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3632 (with
3633 {
3634 tree ty = TREE_TYPE (@0);
3635 unsigned prec = TYPE_PRECISION (ty);
3636 wide_int mask = wi::to_wide (@2, prec);
3637 wide_int rhs = wi::to_wide (@3, prec);
3638 signop sgn = TYPE_SIGN (ty);
3639 }
3640 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3641 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3642 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3643 { build_zero_cst (ty); }))))))
3644
3645 /* -A CMP -B -> B CMP A. */
3646 (for cmp (tcc_comparison)
3647 scmp (swapped_tcc_comparison)
3648 (simplify
3649 (cmp (negate @0) (negate @1))
3650 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3651 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3652 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3653 (scmp @0 @1)))
3654 (simplify
3655 (cmp (negate @0) CONSTANT_CLASS_P@1)
3656 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3657 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3658 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3659 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3660 (if (tem && !TREE_OVERFLOW (tem))
3661 (scmp @0 { tem; }))))))
3662
3663 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3664 (for op (eq ne)
3665 (simplify
3666 (op (abs @0) zerop@1)
3667 (op @0 @1)))
3668
3669 /* From fold_sign_changed_comparison and fold_widened_comparison.
3670 FIXME: the lack of symmetry is disturbing. */
3671 (for cmp (simple_comparison)
3672 (simplify
3673 (cmp (convert@0 @00) (convert?@1 @10))
3674 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3675 /* Disable this optimization if we're casting a function pointer
3676 type on targets that require function pointer canonicalization. */
3677 && !(targetm.have_canonicalize_funcptr_for_compare ()
3678 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3679 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3680 || (POINTER_TYPE_P (TREE_TYPE (@10))
3681 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
3682 && single_use (@0))
3683 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3684 && (TREE_CODE (@10) == INTEGER_CST
3685 || @1 != @10)
3686 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3687 || cmp == NE_EXPR
3688 || cmp == EQ_EXPR)
3689 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3690 /* ??? The special-casing of INTEGER_CST conversion was in the original
3691 code and here to avoid a spurious overflow flag on the resulting
3692 constant which fold_convert produces. */
3693 (if (TREE_CODE (@1) == INTEGER_CST)
3694 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3695 TREE_OVERFLOW (@1)); })
3696 (cmp @00 (convert @1)))
3697
3698 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3699 /* If possible, express the comparison in the shorter mode. */
3700 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3701 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3702 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3703 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3704 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3705 || ((TYPE_PRECISION (TREE_TYPE (@00))
3706 >= TYPE_PRECISION (TREE_TYPE (@10)))
3707 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3708 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3709 || (TREE_CODE (@10) == INTEGER_CST
3710 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3711 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3712 (cmp @00 (convert @10))
3713 (if (TREE_CODE (@10) == INTEGER_CST
3714 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3715 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3716 (with
3717 {
3718 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3719 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3720 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3721 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3722 }
3723 (if (above || below)
3724 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3725 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3726 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3727 { constant_boolean_node (above ? true : false, type); }
3728 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3729 { constant_boolean_node (above ? false : true, type); }))))))))))))
3730
3731 (for cmp (eq ne)
3732 /* A local variable can never be pointed to by
3733 the default SSA name of an incoming parameter.
3734 SSA names are canonicalized to 2nd place. */
3735 (simplify
3736 (cmp addr@0 SSA_NAME@1)
3737 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3738 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3739 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3740 (if (TREE_CODE (base) == VAR_DECL
3741 && auto_var_in_fn_p (base, current_function_decl))
3742 (if (cmp == NE_EXPR)
3743 { constant_boolean_node (true, type); }
3744 { constant_boolean_node (false, type); }))))))
3745
3746 /* Equality compare simplifications from fold_binary */
3747 (for cmp (eq ne)
3748
3749 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3750 Similarly for NE_EXPR. */
3751 (simplify
3752 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3753 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3754 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3755 { constant_boolean_node (cmp == NE_EXPR, type); }))
3756
3757 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3758 (simplify
3759 (cmp (bit_xor @0 @1) integer_zerop)
3760 (cmp @0 @1))
3761
3762 /* (X ^ Y) == Y becomes X == 0.
3763 Likewise (X ^ Y) == X becomes Y == 0. */
3764 (simplify
3765 (cmp:c (bit_xor:c @0 @1) @0)
3766 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3767
3768 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3769 (simplify
3770 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3771 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3772 (cmp @0 (bit_xor @1 (convert @2)))))
3773
3774 (simplify
3775 (cmp (convert? addr@0) integer_zerop)
3776 (if (tree_single_nonzero_warnv_p (@0, NULL))
3777 { constant_boolean_node (cmp == NE_EXPR, type); })))
3778
3779 /* If we have (A & C) == C where C is a power of 2, convert this into
3780 (A & C) != 0. Similarly for NE_EXPR. */
3781 (for cmp (eq ne)
3782 icmp (ne eq)
3783 (simplify
3784 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3785 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3786
3787 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3788 convert this into a shift followed by ANDing with D. */
3789 (simplify
3790 (cond
3791 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3792 INTEGER_CST@2 integer_zerop)
3793 (if (integer_pow2p (@2))
3794 (with {
3795 int shift = (wi::exact_log2 (wi::to_wide (@2))
3796 - wi::exact_log2 (wi::to_wide (@1)));
3797 }
3798 (if (shift > 0)
3799 (bit_and
3800 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3801 (bit_and
3802 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3803 @2)))))
3804
3805 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3806 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3807 (for cmp (eq ne)
3808 ncmp (ge lt)
3809 (simplify
3810 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3811 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3812 && type_has_mode_precision_p (TREE_TYPE (@0))
3813 && element_precision (@2) >= element_precision (@0)
3814 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3815 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3816 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3817
3818 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3819 this into a right shift or sign extension followed by ANDing with C. */
3820 (simplify
3821 (cond
3822 (lt @0 integer_zerop)
3823 INTEGER_CST@1 integer_zerop)
3824 (if (integer_pow2p (@1)
3825 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
3826 (with {
3827 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3828 }
3829 (if (shift >= 0)
3830 (bit_and
3831 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3832 @1)
3833 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3834 sign extension followed by AND with C will achieve the effect. */
3835 (bit_and (convert @0) @1)))))
3836
3837 /* When the addresses are not directly of decls compare base and offset.
3838 This implements some remaining parts of fold_comparison address
3839 comparisons but still no complete part of it. Still it is good
3840 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3841 (for cmp (simple_comparison)
3842 (simplify
3843 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3844 (with
3845 {
3846 poly_int64 off0, off1;
3847 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3848 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3849 if (base0 && TREE_CODE (base0) == MEM_REF)
3850 {
3851 off0 += mem_ref_offset (base0).force_shwi ();
3852 base0 = TREE_OPERAND (base0, 0);
3853 }
3854 if (base1 && TREE_CODE (base1) == MEM_REF)
3855 {
3856 off1 += mem_ref_offset (base1).force_shwi ();
3857 base1 = TREE_OPERAND (base1, 0);
3858 }
3859 }
3860 (if (base0 && base1)
3861 (with
3862 {
3863 int equal = 2;
3864 /* Punt in GENERIC on variables with value expressions;
3865 the value expressions might point to fields/elements
3866 of other vars etc. */
3867 if (GENERIC
3868 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3869 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3870 ;
3871 else if (decl_in_symtab_p (base0)
3872 && decl_in_symtab_p (base1))
3873 equal = symtab_node::get_create (base0)
3874 ->equal_address_to (symtab_node::get_create (base1));
3875 else if ((DECL_P (base0)
3876 || TREE_CODE (base0) == SSA_NAME
3877 || TREE_CODE (base0) == STRING_CST)
3878 && (DECL_P (base1)
3879 || TREE_CODE (base1) == SSA_NAME
3880 || TREE_CODE (base1) == STRING_CST))
3881 equal = (base0 == base1);
3882 }
3883 (if (equal == 1
3884 && (cmp == EQ_EXPR || cmp == NE_EXPR
3885 /* If the offsets are equal we can ignore overflow. */
3886 || known_eq (off0, off1)
3887 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3888 /* Or if we compare using pointers to decls or strings. */
3889 || (POINTER_TYPE_P (TREE_TYPE (@2))
3890 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
3891 (switch
3892 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3893 { constant_boolean_node (known_eq (off0, off1), type); })
3894 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3895 { constant_boolean_node (known_ne (off0, off1), type); })
3896 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3897 { constant_boolean_node (known_lt (off0, off1), type); })
3898 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3899 { constant_boolean_node (known_le (off0, off1), type); })
3900 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3901 { constant_boolean_node (known_ge (off0, off1), type); })
3902 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3903 { constant_boolean_node (known_gt (off0, off1), type); }))
3904 (if (equal == 0
3905 && DECL_P (base0) && DECL_P (base1)
3906 /* If we compare this as integers require equal offset. */
3907 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3908 || known_eq (off0, off1)))
3909 (switch
3910 (if (cmp == EQ_EXPR)
3911 { constant_boolean_node (false, type); })
3912 (if (cmp == NE_EXPR)
3913 { constant_boolean_node (true, type); })))))))))
3914
3915 /* Simplify pointer equality compares using PTA. */
3916 (for neeq (ne eq)
3917 (simplify
3918 (neeq @0 @1)
3919 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3920 && ptrs_compare_unequal (@0, @1))
3921 { constant_boolean_node (neeq != EQ_EXPR, type); })))
3922
3923 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3924 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3925 Disable the transform if either operand is pointer to function.
3926 This broke pr22051-2.c for arm where function pointer
3927 canonicalizaion is not wanted. */
3928
3929 (for cmp (ne eq)
3930 (simplify
3931 (cmp (convert @0) INTEGER_CST@1)
3932 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
3933 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3934 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3935 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3936 && POINTER_TYPE_P (TREE_TYPE (@1))
3937 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3938 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
3939 (cmp @0 (convert @1)))))
3940
3941 /* Non-equality compare simplifications from fold_binary */
3942 (for cmp (lt gt le ge)
3943 /* Comparisons with the highest or lowest possible integer of
3944 the specified precision will have known values. */
3945 (simplify
3946 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
3947 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
3948 || POINTER_TYPE_P (TREE_TYPE (@1))
3949 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
3950 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3951 (with
3952 {
3953 tree cst = uniform_integer_cst_p (@1);
3954 tree arg1_type = TREE_TYPE (cst);
3955 unsigned int prec = TYPE_PRECISION (arg1_type);
3956 wide_int max = wi::max_value (arg1_type);
3957 wide_int signed_max = wi::max_value (prec, SIGNED);
3958 wide_int min = wi::min_value (arg1_type);
3959 }
3960 (switch
3961 (if (wi::to_wide (cst) == max)
3962 (switch
3963 (if (cmp == GT_EXPR)
3964 { constant_boolean_node (false, type); })
3965 (if (cmp == GE_EXPR)
3966 (eq @2 @1))
3967 (if (cmp == LE_EXPR)
3968 { constant_boolean_node (true, type); })
3969 (if (cmp == LT_EXPR)
3970 (ne @2 @1))))
3971 (if (wi::to_wide (cst) == min)
3972 (switch
3973 (if (cmp == LT_EXPR)
3974 { constant_boolean_node (false, type); })
3975 (if (cmp == LE_EXPR)
3976 (eq @2 @1))
3977 (if (cmp == GE_EXPR)
3978 { constant_boolean_node (true, type); })
3979 (if (cmp == GT_EXPR)
3980 (ne @2 @1))))
3981 (if (wi::to_wide (cst) == max - 1)
3982 (switch
3983 (if (cmp == GT_EXPR)
3984 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
3985 wide_int_to_tree (TREE_TYPE (cst),
3986 wi::to_wide (cst)
3987 + 1)); }))
3988 (if (cmp == LE_EXPR)
3989 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
3990 wide_int_to_tree (TREE_TYPE (cst),
3991 wi::to_wide (cst)
3992 + 1)); }))))
3993 (if (wi::to_wide (cst) == min + 1)
3994 (switch
3995 (if (cmp == GE_EXPR)
3996 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
3997 wide_int_to_tree (TREE_TYPE (cst),
3998 wi::to_wide (cst)
3999 - 1)); }))
4000 (if (cmp == LT_EXPR)
4001 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4002 wide_int_to_tree (TREE_TYPE (cst),
4003 wi::to_wide (cst)
4004 - 1)); }))))
4005 (if (wi::to_wide (cst) == signed_max
4006 && TYPE_UNSIGNED (arg1_type)
4007 /* We will flip the signedness of the comparison operator
4008 associated with the mode of @1, so the sign bit is
4009 specified by this mode. Check that @1 is the signed
4010 max associated with this sign bit. */
4011 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
4012 /* signed_type does not work on pointer types. */
4013 && INTEGRAL_TYPE_P (arg1_type))
4014 /* The following case also applies to X < signed_max+1
4015 and X >= signed_max+1 because previous transformations. */
4016 (if (cmp == LE_EXPR || cmp == GT_EXPR)
4017 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4018 (switch
4019 (if (cst == @1 && cmp == LE_EXPR)
4020 (ge (convert:st @0) { build_zero_cst (st); }))
4021 (if (cst == @1 && cmp == GT_EXPR)
4022 (lt (convert:st @0) { build_zero_cst (st); }))
4023 (if (cmp == LE_EXPR)
4024 (ge (view_convert:st @0) { build_zero_cst (st); }))
4025 (if (cmp == GT_EXPR)
4026 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
4027
4028 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4029 /* If the second operand is NaN, the result is constant. */
4030 (simplify
4031 (cmp @0 REAL_CST@1)
4032 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4033 && (cmp != LTGT_EXPR || ! flag_trapping_math))
4034 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
4035 ? false : true, type); })))
4036
4037 /* bool_var != 0 becomes bool_var. */
4038 (simplify
4039 (ne @0 integer_zerop)
4040 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4041 && types_match (type, TREE_TYPE (@0)))
4042 (non_lvalue @0)))
4043 /* bool_var == 1 becomes bool_var. */
4044 (simplify
4045 (eq @0 integer_onep)
4046 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4047 && types_match (type, TREE_TYPE (@0)))
4048 (non_lvalue @0)))
4049 /* Do not handle
4050 bool_var == 0 becomes !bool_var or
4051 bool_var != 1 becomes !bool_var
4052 here because that only is good in assignment context as long
4053 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4054 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4055 clearly less optimal and which we'll transform again in forwprop. */
4056
4057 /* When one argument is a constant, overflow detection can be simplified.
4058 Currently restricted to single use so as not to interfere too much with
4059 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4060 A + CST CMP A -> A CMP' CST' */
4061 (for cmp (lt le ge gt)
4062 out (gt gt le le)
4063 (simplify
4064 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4065 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4066 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4067 && wi::to_wide (@1) != 0
4068 && single_use (@2))
4069 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4070 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4071 wi::max_value (prec, UNSIGNED)
4072 - wi::to_wide (@1)); })))))
4073
4074 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4075 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4076 expects the long form, so we restrict the transformation for now. */
4077 (for cmp (gt le)
4078 (simplify
4079 (cmp:c (minus@2 @0 @1) @0)
4080 (if (single_use (@2)
4081 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4082 && TYPE_UNSIGNED (TREE_TYPE (@0))
4083 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4084 (cmp @1 @0))))
4085
4086 /* Testing for overflow is unnecessary if we already know the result. */
4087 /* A - B > A */
4088 (for cmp (gt le)
4089 out (ne eq)
4090 (simplify
4091 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4092 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4093 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4094 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4095 /* A + B < A */
4096 (for cmp (lt ge)
4097 out (ne eq)
4098 (simplify
4099 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4100 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4101 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4102 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4103
4104 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4105 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
4106 (for cmp (lt ge)
4107 out (ne eq)
4108 (simplify
4109 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4110 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4111 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4112 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4113
4114 /* Simplification of math builtins. These rules must all be optimizations
4115 as well as IL simplifications. If there is a possibility that the new
4116 form could be a pessimization, the rule should go in the canonicalization
4117 section that follows this one.
4118
4119 Rules can generally go in this section if they satisfy one of
4120 the following:
4121
4122 - the rule describes an identity
4123
4124 - the rule replaces calls with something as simple as addition or
4125 multiplication
4126
4127 - the rule contains unary calls only and simplifies the surrounding
4128 arithmetic. (The idea here is to exclude non-unary calls in which
4129 one operand is constant and in which the call is known to be cheap
4130 when the operand has that value.) */
4131
4132 (if (flag_unsafe_math_optimizations)
4133 /* Simplify sqrt(x) * sqrt(x) -> x. */
4134 (simplify
4135 (mult (SQRT_ALL@1 @0) @1)
4136 (if (!HONOR_SNANS (type))
4137 @0))
4138
4139 (for op (plus minus)
4140 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4141 (simplify
4142 (op (rdiv @0 @1)
4143 (rdiv @2 @1))
4144 (rdiv (op @0 @2) @1)))
4145
4146 (for cmp (lt le gt ge)
4147 neg_cmp (gt ge lt le)
4148 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4149 (simplify
4150 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4151 (with
4152 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4153 (if (tem
4154 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4155 || (real_zerop (tem) && !real_zerop (@1))))
4156 (switch
4157 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4158 (cmp @0 { tem; }))
4159 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4160 (neg_cmp @0 { tem; })))))))
4161
4162 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4163 (for root (SQRT CBRT)
4164 (simplify
4165 (mult (root:s @0) (root:s @1))
4166 (root (mult @0 @1))))
4167
4168 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4169 (for exps (EXP EXP2 EXP10 POW10)
4170 (simplify
4171 (mult (exps:s @0) (exps:s @1))
4172 (exps (plus @0 @1))))
4173
4174 /* Simplify a/root(b/c) into a*root(c/b). */
4175 (for root (SQRT CBRT)
4176 (simplify
4177 (rdiv @0 (root:s (rdiv:s @1 @2)))
4178 (mult @0 (root (rdiv @2 @1)))))
4179
4180 /* Simplify x/expN(y) into x*expN(-y). */
4181 (for exps (EXP EXP2 EXP10 POW10)
4182 (simplify
4183 (rdiv @0 (exps:s @1))
4184 (mult @0 (exps (negate @1)))))
4185
4186 (for logs (LOG LOG2 LOG10 LOG10)
4187 exps (EXP EXP2 EXP10 POW10)
4188 /* logN(expN(x)) -> x. */
4189 (simplify
4190 (logs (exps @0))
4191 @0)
4192 /* expN(logN(x)) -> x. */
4193 (simplify
4194 (exps (logs @0))
4195 @0))
4196
4197 /* Optimize logN(func()) for various exponential functions. We
4198 want to determine the value "x" and the power "exponent" in
4199 order to transform logN(x**exponent) into exponent*logN(x). */
4200 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4201 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
4202 (simplify
4203 (logs (exps @0))
4204 (if (SCALAR_FLOAT_TYPE_P (type))
4205 (with {
4206 tree x;
4207 switch (exps)
4208 {
4209 CASE_CFN_EXP:
4210 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4211 x = build_real_truncate (type, dconst_e ());
4212 break;
4213 CASE_CFN_EXP2:
4214 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4215 x = build_real (type, dconst2);
4216 break;
4217 CASE_CFN_EXP10:
4218 CASE_CFN_POW10:
4219 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4220 {
4221 REAL_VALUE_TYPE dconst10;
4222 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4223 x = build_real (type, dconst10);
4224 }
4225 break;
4226 default:
4227 gcc_unreachable ();
4228 }
4229 }
4230 (mult (logs { x; }) @0)))))
4231
4232 (for logs (LOG LOG
4233 LOG2 LOG2
4234 LOG10 LOG10)
4235 exps (SQRT CBRT)
4236 (simplify
4237 (logs (exps @0))
4238 (if (SCALAR_FLOAT_TYPE_P (type))
4239 (with {
4240 tree x;
4241 switch (exps)
4242 {
4243 CASE_CFN_SQRT:
4244 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4245 x = build_real (type, dconsthalf);
4246 break;
4247 CASE_CFN_CBRT:
4248 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4249 x = build_real_truncate (type, dconst_third ());
4250 break;
4251 default:
4252 gcc_unreachable ();
4253 }
4254 }
4255 (mult { x; } (logs @0))))))
4256
4257 /* logN(pow(x,exponent)) -> exponent*logN(x). */
4258 (for logs (LOG LOG2 LOG10)
4259 pows (POW)
4260 (simplify
4261 (logs (pows @0 @1))
4262 (mult @1 (logs @0))))
4263
4264 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4265 or if C is a positive power of 2,
4266 pow(C,x) -> exp2(log2(C)*x). */
4267 #if GIMPLE
4268 (for pows (POW)
4269 exps (EXP)
4270 logs (LOG)
4271 exp2s (EXP2)
4272 log2s (LOG2)
4273 (simplify
4274 (pows REAL_CST@0 @1)
4275 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4276 && real_isfinite (TREE_REAL_CST_PTR (@0))
4277 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4278 the use_exp2 case until after vectorization. It seems actually
4279 beneficial for all constants to postpone this until later,
4280 because exp(log(C)*x), while faster, will have worse precision
4281 and if x folds into a constant too, that is unnecessary
4282 pessimization. */
4283 && canonicalize_math_after_vectorization_p ())
4284 (with {
4285 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4286 bool use_exp2 = false;
4287 if (targetm.libc_has_function (function_c99_misc)
4288 && value->cl == rvc_normal)
4289 {
4290 REAL_VALUE_TYPE frac_rvt = *value;
4291 SET_REAL_EXP (&frac_rvt, 1);
4292 if (real_equal (&frac_rvt, &dconst1))
4293 use_exp2 = true;
4294 }
4295 }
4296 (if (!use_exp2)
4297 (if (optimize_pow_to_exp (@0, @1))
4298 (exps (mult (logs @0) @1)))
4299 (exp2s (mult (log2s @0) @1)))))))
4300 #endif
4301
4302 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4303 (for pows (POW)
4304 exps (EXP EXP2 EXP10 POW10)
4305 logs (LOG LOG2 LOG10 LOG10)
4306 (simplify
4307 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4308 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4309 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4310 (exps (plus (mult (logs @0) @1) @2)))))
4311
4312 (for sqrts (SQRT)
4313 cbrts (CBRT)
4314 pows (POW)
4315 exps (EXP EXP2 EXP10 POW10)
4316 /* sqrt(expN(x)) -> expN(x*0.5). */
4317 (simplify
4318 (sqrts (exps @0))
4319 (exps (mult @0 { build_real (type, dconsthalf); })))
4320 /* cbrt(expN(x)) -> expN(x/3). */
4321 (simplify
4322 (cbrts (exps @0))
4323 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4324 /* pow(expN(x), y) -> expN(x*y). */
4325 (simplify
4326 (pows (exps @0) @1)
4327 (exps (mult @0 @1))))
4328
4329 /* tan(atan(x)) -> x. */
4330 (for tans (TAN)
4331 atans (ATAN)
4332 (simplify
4333 (tans (atans @0))
4334 @0)))
4335
4336 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4337 (for sins (SIN)
4338 atans (ATAN)
4339 sqrts (SQRT)
4340 copysigns (COPYSIGN)
4341 (simplify
4342 (sins (atans:s @0))
4343 (with
4344 {
4345 REAL_VALUE_TYPE r_cst;
4346 build_sinatan_real (&r_cst, type);
4347 tree t_cst = build_real (type, r_cst);
4348 tree t_one = build_one_cst (type);
4349 }
4350 (if (SCALAR_FLOAT_TYPE_P (type))
4351 (cond (le (abs @0) { t_cst; })
4352 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4353 (copysigns { t_one; } @0))))))
4354
4355 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4356 (for coss (COS)
4357 atans (ATAN)
4358 sqrts (SQRT)
4359 copysigns (COPYSIGN)
4360 (simplify
4361 (coss (atans:s @0))
4362 (with
4363 {
4364 REAL_VALUE_TYPE r_cst;
4365 build_sinatan_real (&r_cst, type);
4366 tree t_cst = build_real (type, r_cst);
4367 tree t_one = build_one_cst (type);
4368 tree t_zero = build_zero_cst (type);
4369 }
4370 (if (SCALAR_FLOAT_TYPE_P (type))
4371 (cond (le (abs @0) { t_cst; })
4372 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4373 (copysigns { t_zero; } @0))))))
4374
4375 (if (!flag_errno_math)
4376 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
4377 (for sinhs (SINH)
4378 atanhs (ATANH)
4379 sqrts (SQRT)
4380 (simplify
4381 (sinhs (atanhs:s @0))
4382 (with { tree t_one = build_one_cst (type); }
4383 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
4384
4385 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
4386 (for coshs (COSH)
4387 atanhs (ATANH)
4388 sqrts (SQRT)
4389 (simplify
4390 (coshs (atanhs:s @0))
4391 (with { tree t_one = build_one_cst (type); }
4392 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
4393
4394 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4395 (simplify
4396 (CABS (complex:C @0 real_zerop@1))
4397 (abs @0))
4398
4399 /* trunc(trunc(x)) -> trunc(x), etc. */
4400 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4401 (simplify
4402 (fns (fns @0))
4403 (fns @0)))
4404 /* f(x) -> x if x is integer valued and f does nothing for such values. */
4405 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4406 (simplify
4407 (fns integer_valued_real_p@0)
4408 @0))
4409
4410 /* hypot(x,0) and hypot(0,x) -> abs(x). */
4411 (simplify
4412 (HYPOT:c @0 real_zerop@1)
4413 (abs @0))
4414
4415 /* pow(1,x) -> 1. */
4416 (simplify
4417 (POW real_onep@0 @1)
4418 @0)
4419
4420 (simplify
4421 /* copysign(x,x) -> x. */
4422 (COPYSIGN_ALL @0 @0)
4423 @0)
4424
4425 (simplify
4426 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
4427 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
4428 (abs @0))
4429
4430 (for scale (LDEXP SCALBN SCALBLN)
4431 /* ldexp(0, x) -> 0. */
4432 (simplify
4433 (scale real_zerop@0 @1)
4434 @0)
4435 /* ldexp(x, 0) -> x. */
4436 (simplify
4437 (scale @0 integer_zerop@1)
4438 @0)
4439 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4440 (simplify
4441 (scale REAL_CST@0 @1)
4442 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4443 @0)))
4444
4445 /* Canonicalization of sequences of math builtins. These rules represent
4446 IL simplifications but are not necessarily optimizations.
4447
4448 The sincos pass is responsible for picking "optimal" implementations
4449 of math builtins, which may be more complicated and can sometimes go
4450 the other way, e.g. converting pow into a sequence of sqrts.
4451 We only want to do these canonicalizations before the pass has run. */
4452
4453 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4454 /* Simplify tan(x) * cos(x) -> sin(x). */
4455 (simplify
4456 (mult:c (TAN:s @0) (COS:s @0))
4457 (SIN @0))
4458
4459 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4460 (simplify
4461 (mult:c @0 (POW:s @0 REAL_CST@1))
4462 (if (!TREE_OVERFLOW (@1))
4463 (POW @0 (plus @1 { build_one_cst (type); }))))
4464
4465 /* Simplify sin(x) / cos(x) -> tan(x). */
4466 (simplify
4467 (rdiv (SIN:s @0) (COS:s @0))
4468 (TAN @0))
4469
4470 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4471 (simplify
4472 (rdiv (COS:s @0) (SIN:s @0))
4473 (rdiv { build_one_cst (type); } (TAN @0)))
4474
4475 /* Simplify sin(x) / tan(x) -> cos(x). */
4476 (simplify
4477 (rdiv (SIN:s @0) (TAN:s @0))
4478 (if (! HONOR_NANS (@0)
4479 && ! HONOR_INFINITIES (@0))
4480 (COS @0)))
4481
4482 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4483 (simplify
4484 (rdiv (TAN:s @0) (SIN:s @0))
4485 (if (! HONOR_NANS (@0)
4486 && ! HONOR_INFINITIES (@0))
4487 (rdiv { build_one_cst (type); } (COS @0))))
4488
4489 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4490 (simplify
4491 (mult (POW:s @0 @1) (POW:s @0 @2))
4492 (POW @0 (plus @1 @2)))
4493
4494 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4495 (simplify
4496 (mult (POW:s @0 @1) (POW:s @2 @1))
4497 (POW (mult @0 @2) @1))
4498
4499 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4500 (simplify
4501 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4502 (POWI (mult @0 @2) @1))
4503
4504 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4505 (simplify
4506 (rdiv (POW:s @0 REAL_CST@1) @0)
4507 (if (!TREE_OVERFLOW (@1))
4508 (POW @0 (minus @1 { build_one_cst (type); }))))
4509
4510 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4511 (simplify
4512 (rdiv @0 (POW:s @1 @2))
4513 (mult @0 (POW @1 (negate @2))))
4514
4515 (for sqrts (SQRT)
4516 cbrts (CBRT)
4517 pows (POW)
4518 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4519 (simplify
4520 (sqrts (sqrts @0))
4521 (pows @0 { build_real (type, dconst_quarter ()); }))
4522 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4523 (simplify
4524 (sqrts (cbrts @0))
4525 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4526 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4527 (simplify
4528 (cbrts (sqrts @0))
4529 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4530 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4531 (simplify
4532 (cbrts (cbrts tree_expr_nonnegative_p@0))
4533 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4534 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4535 (simplify
4536 (sqrts (pows @0 @1))
4537 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4538 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4539 (simplify
4540 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4541 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4542 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4543 (simplify
4544 (pows (sqrts @0) @1)
4545 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4546 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4547 (simplify
4548 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4549 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4550 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4551 (simplify
4552 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4553 (pows @0 (mult @1 @2))))
4554
4555 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4556 (simplify
4557 (CABS (complex @0 @0))
4558 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4559
4560 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4561 (simplify
4562 (HYPOT @0 @0)
4563 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4564
4565 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4566 (for cexps (CEXP)
4567 exps (EXP)
4568 cexpis (CEXPI)
4569 (simplify
4570 (cexps compositional_complex@0)
4571 (if (targetm.libc_has_function (function_c99_math_complex))
4572 (complex
4573 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4574 (mult @1 (imagpart @2)))))))
4575
4576 (if (canonicalize_math_p ())
4577 /* floor(x) -> trunc(x) if x is nonnegative. */
4578 (for floors (FLOOR_ALL)
4579 truncs (TRUNC_ALL)
4580 (simplify
4581 (floors tree_expr_nonnegative_p@0)
4582 (truncs @0))))
4583
4584 (match double_value_p
4585 @0
4586 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4587 (for froms (BUILT_IN_TRUNCL
4588 BUILT_IN_FLOORL
4589 BUILT_IN_CEILL
4590 BUILT_IN_ROUNDL
4591 BUILT_IN_NEARBYINTL
4592 BUILT_IN_RINTL)
4593 tos (BUILT_IN_TRUNC
4594 BUILT_IN_FLOOR
4595 BUILT_IN_CEIL
4596 BUILT_IN_ROUND
4597 BUILT_IN_NEARBYINT
4598 BUILT_IN_RINT)
4599 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4600 (if (optimize && canonicalize_math_p ())
4601 (simplify
4602 (froms (convert double_value_p@0))
4603 (convert (tos @0)))))
4604
4605 (match float_value_p
4606 @0
4607 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4608 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4609 BUILT_IN_FLOORL BUILT_IN_FLOOR
4610 BUILT_IN_CEILL BUILT_IN_CEIL
4611 BUILT_IN_ROUNDL BUILT_IN_ROUND
4612 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4613 BUILT_IN_RINTL BUILT_IN_RINT)
4614 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4615 BUILT_IN_FLOORF BUILT_IN_FLOORF
4616 BUILT_IN_CEILF BUILT_IN_CEILF
4617 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4618 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4619 BUILT_IN_RINTF BUILT_IN_RINTF)
4620 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4621 if x is a float. */
4622 (if (optimize && canonicalize_math_p ()
4623 && targetm.libc_has_function (function_c99_misc))
4624 (simplify
4625 (froms (convert float_value_p@0))
4626 (convert (tos @0)))))
4627
4628 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4629 tos (XFLOOR XCEIL XROUND XRINT)
4630 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4631 (if (optimize && canonicalize_math_p ())
4632 (simplify
4633 (froms (convert double_value_p@0))
4634 (tos @0))))
4635
4636 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4637 XFLOOR XCEIL XROUND XRINT)
4638 tos (XFLOORF XCEILF XROUNDF XRINTF)
4639 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4640 if x is a float. */
4641 (if (optimize && canonicalize_math_p ())
4642 (simplify
4643 (froms (convert float_value_p@0))
4644 (tos @0))))
4645
4646 (if (canonicalize_math_p ())
4647 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4648 (for floors (IFLOOR LFLOOR LLFLOOR)
4649 (simplify
4650 (floors tree_expr_nonnegative_p@0)
4651 (fix_trunc @0))))
4652
4653 (if (canonicalize_math_p ())
4654 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4655 (for fns (IFLOOR LFLOOR LLFLOOR
4656 ICEIL LCEIL LLCEIL
4657 IROUND LROUND LLROUND)
4658 (simplify
4659 (fns integer_valued_real_p@0)
4660 (fix_trunc @0)))
4661 (if (!flag_errno_math)
4662 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4663 (for rints (IRINT LRINT LLRINT)
4664 (simplify
4665 (rints integer_valued_real_p@0)
4666 (fix_trunc @0)))))
4667
4668 (if (canonicalize_math_p ())
4669 (for ifn (IFLOOR ICEIL IROUND IRINT)
4670 lfn (LFLOOR LCEIL LROUND LRINT)
4671 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4672 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4673 sizeof (int) == sizeof (long). */
4674 (if (TYPE_PRECISION (integer_type_node)
4675 == TYPE_PRECISION (long_integer_type_node))
4676 (simplify
4677 (ifn @0)
4678 (lfn:long_integer_type_node @0)))
4679 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4680 sizeof (long long) == sizeof (long). */
4681 (if (TYPE_PRECISION (long_long_integer_type_node)
4682 == TYPE_PRECISION (long_integer_type_node))
4683 (simplify
4684 (llfn @0)
4685 (lfn:long_integer_type_node @0)))))
4686
4687 /* cproj(x) -> x if we're ignoring infinities. */
4688 (simplify
4689 (CPROJ @0)
4690 (if (!HONOR_INFINITIES (type))
4691 @0))
4692
4693 /* If the real part is inf and the imag part is known to be
4694 nonnegative, return (inf + 0i). */
4695 (simplify
4696 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4697 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4698 { build_complex_inf (type, false); }))
4699
4700 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4701 (simplify
4702 (CPROJ (complex @0 REAL_CST@1))
4703 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4704 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4705
4706 (for pows (POW)
4707 sqrts (SQRT)
4708 cbrts (CBRT)
4709 (simplify
4710 (pows @0 REAL_CST@1)
4711 (with {
4712 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4713 REAL_VALUE_TYPE tmp;
4714 }
4715 (switch
4716 /* pow(x,0) -> 1. */
4717 (if (real_equal (value, &dconst0))
4718 { build_real (type, dconst1); })
4719 /* pow(x,1) -> x. */
4720 (if (real_equal (value, &dconst1))
4721 @0)
4722 /* pow(x,-1) -> 1/x. */
4723 (if (real_equal (value, &dconstm1))
4724 (rdiv { build_real (type, dconst1); } @0))
4725 /* pow(x,0.5) -> sqrt(x). */
4726 (if (flag_unsafe_math_optimizations
4727 && canonicalize_math_p ()
4728 && real_equal (value, &dconsthalf))
4729 (sqrts @0))
4730 /* pow(x,1/3) -> cbrt(x). */
4731 (if (flag_unsafe_math_optimizations
4732 && canonicalize_math_p ()
4733 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4734 real_equal (value, &tmp)))
4735 (cbrts @0))))))
4736
4737 /* powi(1,x) -> 1. */
4738 (simplify
4739 (POWI real_onep@0 @1)
4740 @0)
4741
4742 (simplify
4743 (POWI @0 INTEGER_CST@1)
4744 (switch
4745 /* powi(x,0) -> 1. */
4746 (if (wi::to_wide (@1) == 0)
4747 { build_real (type, dconst1); })
4748 /* powi(x,1) -> x. */
4749 (if (wi::to_wide (@1) == 1)
4750 @0)
4751 /* powi(x,-1) -> 1/x. */
4752 (if (wi::to_wide (@1) == -1)
4753 (rdiv { build_real (type, dconst1); } @0))))
4754
4755 /* Narrowing of arithmetic and logical operations.
4756
4757 These are conceptually similar to the transformations performed for
4758 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4759 term we want to move all that code out of the front-ends into here. */
4760
4761 /* If we have a narrowing conversion of an arithmetic operation where
4762 both operands are widening conversions from the same type as the outer
4763 narrowing conversion. Then convert the innermost operands to a suitable
4764 unsigned type (to avoid introducing undefined behavior), perform the
4765 operation and convert the result to the desired type. */
4766 (for op (plus minus)
4767 (simplify
4768 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4769 (if (INTEGRAL_TYPE_P (type)
4770 /* We check for type compatibility between @0 and @1 below,
4771 so there's no need to check that @1/@3 are integral types. */
4772 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4773 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4774 /* The precision of the type of each operand must match the
4775 precision of the mode of each operand, similarly for the
4776 result. */
4777 && type_has_mode_precision_p (TREE_TYPE (@0))
4778 && type_has_mode_precision_p (TREE_TYPE (@1))
4779 && type_has_mode_precision_p (type)
4780 /* The inner conversion must be a widening conversion. */
4781 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4782 && types_match (@0, type)
4783 && (types_match (@0, @1)
4784 /* Or the second operand is const integer or converted const
4785 integer from valueize. */
4786 || TREE_CODE (@1) == INTEGER_CST))
4787 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4788 (op @0 (convert @1))
4789 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4790 (convert (op (convert:utype @0)
4791 (convert:utype @1))))))))
4792
4793 /* This is another case of narrowing, specifically when there's an outer
4794 BIT_AND_EXPR which masks off bits outside the type of the innermost
4795 operands. Like the previous case we have to convert the operands
4796 to unsigned types to avoid introducing undefined behavior for the
4797 arithmetic operation. */
4798 (for op (minus plus)
4799 (simplify
4800 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4801 (if (INTEGRAL_TYPE_P (type)
4802 /* We check for type compatibility between @0 and @1 below,
4803 so there's no need to check that @1/@3 are integral types. */
4804 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4805 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4806 /* The precision of the type of each operand must match the
4807 precision of the mode of each operand, similarly for the
4808 result. */
4809 && type_has_mode_precision_p (TREE_TYPE (@0))
4810 && type_has_mode_precision_p (TREE_TYPE (@1))
4811 && type_has_mode_precision_p (type)
4812 /* The inner conversion must be a widening conversion. */
4813 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4814 && types_match (@0, @1)
4815 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4816 <= TYPE_PRECISION (TREE_TYPE (@0)))
4817 && (wi::to_wide (@4)
4818 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4819 true, TYPE_PRECISION (type))) == 0)
4820 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4821 (with { tree ntype = TREE_TYPE (@0); }
4822 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4823 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4824 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4825 (convert:utype @4))))))))
4826
4827 /* Transform (@0 < @1 and @0 < @2) to use min,
4828 (@0 > @1 and @0 > @2) to use max */
4829 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
4830 op (lt le gt ge lt le gt ge )
4831 ext (min min max max max max min min )
4832 (simplify
4833 (logic (op:cs @0 @1) (op:cs @0 @2))
4834 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4835 && TREE_CODE (@0) != INTEGER_CST)
4836 (op @0 (ext @1 @2)))))
4837
4838 (simplify
4839 /* signbit(x) -> 0 if x is nonnegative. */
4840 (SIGNBIT tree_expr_nonnegative_p@0)
4841 { integer_zero_node; })
4842
4843 (simplify
4844 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4845 (SIGNBIT @0)
4846 (if (!HONOR_SIGNED_ZEROS (@0))
4847 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4848
4849 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4850 (for cmp (eq ne)
4851 (for op (plus minus)
4852 rop (minus plus)
4853 (simplify
4854 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4855 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4856 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4857 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4858 && !TYPE_SATURATING (TREE_TYPE (@0)))
4859 (with { tree res = int_const_binop (rop, @2, @1); }
4860 (if (TREE_OVERFLOW (res)
4861 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4862 { constant_boolean_node (cmp == NE_EXPR, type); }
4863 (if (single_use (@3))
4864 (cmp @0 { TREE_OVERFLOW (res)
4865 ? drop_tree_overflow (res) : res; }))))))))
4866 (for cmp (lt le gt ge)
4867 (for op (plus minus)
4868 rop (minus plus)
4869 (simplify
4870 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4871 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4872 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4873 (with { tree res = int_const_binop (rop, @2, @1); }
4874 (if (TREE_OVERFLOW (res))
4875 {
4876 fold_overflow_warning (("assuming signed overflow does not occur "
4877 "when simplifying conditional to constant"),
4878 WARN_STRICT_OVERFLOW_CONDITIONAL);
4879 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4880 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4881 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4882 TYPE_SIGN (TREE_TYPE (@1)))
4883 != (op == MINUS_EXPR);
4884 constant_boolean_node (less == ovf_high, type);
4885 }
4886 (if (single_use (@3))
4887 (with
4888 {
4889 fold_overflow_warning (("assuming signed overflow does not occur "
4890 "when changing X +- C1 cmp C2 to "
4891 "X cmp C2 -+ C1"),
4892 WARN_STRICT_OVERFLOW_COMPARISON);
4893 }
4894 (cmp @0 { res; })))))))))
4895
4896 /* Canonicalizations of BIT_FIELD_REFs. */
4897
4898 (simplify
4899 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
4900 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
4901
4902 (simplify
4903 (BIT_FIELD_REF (view_convert @0) @1 @2)
4904 (BIT_FIELD_REF @0 @1 @2))
4905
4906 (simplify
4907 (BIT_FIELD_REF @0 @1 integer_zerop)
4908 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
4909 (view_convert @0)))
4910
4911 (simplify
4912 (BIT_FIELD_REF @0 @1 @2)
4913 (switch
4914 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4915 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4916 (switch
4917 (if (integer_zerop (@2))
4918 (view_convert (realpart @0)))
4919 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4920 (view_convert (imagpart @0)))))
4921 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4922 && INTEGRAL_TYPE_P (type)
4923 /* On GIMPLE this should only apply to register arguments. */
4924 && (! GIMPLE || is_gimple_reg (@0))
4925 /* A bit-field-ref that referenced the full argument can be stripped. */
4926 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4927 && integer_zerop (@2))
4928 /* Low-parts can be reduced to integral conversions.
4929 ??? The following doesn't work for PDP endian. */
4930 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4931 /* Don't even think about BITS_BIG_ENDIAN. */
4932 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4933 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4934 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4935 ? (TYPE_PRECISION (TREE_TYPE (@0))
4936 - TYPE_PRECISION (type))
4937 : 0)) == 0)))
4938 (convert @0))))
4939
4940 /* Simplify vector extracts. */
4941
4942 (simplify
4943 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4944 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4945 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4946 || (VECTOR_TYPE_P (type)
4947 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4948 (with
4949 {
4950 tree ctor = (TREE_CODE (@0) == SSA_NAME
4951 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4952 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4953 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4954 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4955 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4956 }
4957 (if (n != 0
4958 && (idx % width) == 0
4959 && (n % width) == 0
4960 && known_le ((idx + n) / width,
4961 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
4962 (with
4963 {
4964 idx = idx / width;
4965 n = n / width;
4966 /* Constructor elements can be subvectors. */
4967 poly_uint64 k = 1;
4968 if (CONSTRUCTOR_NELTS (ctor) != 0)
4969 {
4970 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4971 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4972 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4973 }
4974 unsigned HOST_WIDE_INT elt, count, const_k;
4975 }
4976 (switch
4977 /* We keep an exact subset of the constructor elements. */
4978 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
4979 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4980 { build_constructor (type, NULL); }
4981 (if (count == 1)
4982 (if (elt < CONSTRUCTOR_NELTS (ctor))
4983 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
4984 { build_zero_cst (type); })
4985 {
4986 vec<constructor_elt, va_gc> *vals;
4987 vec_alloc (vals, count);
4988 for (unsigned i = 0;
4989 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
4990 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4991 CONSTRUCTOR_ELT (ctor, elt + i)->value);
4992 build_constructor (type, vals);
4993 })))
4994 /* The bitfield references a single constructor element. */
4995 (if (k.is_constant (&const_k)
4996 && idx + n <= (idx / const_k + 1) * const_k)
4997 (switch
4998 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
4999 { build_zero_cst (type); })
5000 (if (n == const_k)
5001 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
5002 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
5003 @1 { bitsize_int ((idx % const_k) * width); })))))))))
5004
5005 /* Simplify a bit extraction from a bit insertion for the cases with
5006 the inserted element fully covering the extraction or the insertion
5007 not touching the extraction. */
5008 (simplify
5009 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
5010 (with
5011 {
5012 unsigned HOST_WIDE_INT isize;
5013 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
5014 isize = TYPE_PRECISION (TREE_TYPE (@1));
5015 else
5016 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
5017 }
5018 (switch
5019 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
5020 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5021 wi::to_wide (@ipos) + isize))
5022 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
5023 wi::to_wide (@rpos)
5024 - wi::to_wide (@ipos)); }))
5025 (if (wi::geu_p (wi::to_wide (@ipos),
5026 wi::to_wide (@rpos) + wi::to_wide (@rsize))
5027 || wi::geu_p (wi::to_wide (@rpos),
5028 wi::to_wide (@ipos) + isize))
5029 (BIT_FIELD_REF @0 @rsize @rpos)))))
5030
5031 (if (canonicalize_math_after_vectorization_p ())
5032 (for fmas (FMA)
5033 (simplify
5034 (fmas:c (negate @0) @1 @2)
5035 (IFN_FNMA @0 @1 @2))
5036 (simplify
5037 (fmas @0 @1 (negate @2))
5038 (IFN_FMS @0 @1 @2))
5039 (simplify
5040 (fmas:c (negate @0) @1 (negate @2))
5041 (IFN_FNMS @0 @1 @2))
5042 (simplify
5043 (negate (fmas@3 @0 @1 @2))
5044 (if (single_use (@3))
5045 (IFN_FNMS @0 @1 @2))))
5046
5047 (simplify
5048 (IFN_FMS:c (negate @0) @1 @2)
5049 (IFN_FNMS @0 @1 @2))
5050 (simplify
5051 (IFN_FMS @0 @1 (negate @2))
5052 (IFN_FMA @0 @1 @2))
5053 (simplify
5054 (IFN_FMS:c (negate @0) @1 (negate @2))
5055 (IFN_FNMA @0 @1 @2))
5056 (simplify
5057 (negate (IFN_FMS@3 @0 @1 @2))
5058 (if (single_use (@3))
5059 (IFN_FNMA @0 @1 @2)))
5060
5061 (simplify
5062 (IFN_FNMA:c (negate @0) @1 @2)
5063 (IFN_FMA @0 @1 @2))
5064 (simplify
5065 (IFN_FNMA @0 @1 (negate @2))
5066 (IFN_FNMS @0 @1 @2))
5067 (simplify
5068 (IFN_FNMA:c (negate @0) @1 (negate @2))
5069 (IFN_FMS @0 @1 @2))
5070 (simplify
5071 (negate (IFN_FNMA@3 @0 @1 @2))
5072 (if (single_use (@3))
5073 (IFN_FMS @0 @1 @2)))
5074
5075 (simplify
5076 (IFN_FNMS:c (negate @0) @1 @2)
5077 (IFN_FMS @0 @1 @2))
5078 (simplify
5079 (IFN_FNMS @0 @1 (negate @2))
5080 (IFN_FNMA @0 @1 @2))
5081 (simplify
5082 (IFN_FNMS:c (negate @0) @1 (negate @2))
5083 (IFN_FMA @0 @1 @2))
5084 (simplify
5085 (negate (IFN_FNMS@3 @0 @1 @2))
5086 (if (single_use (@3))
5087 (IFN_FMA @0 @1 @2))))
5088
5089 /* POPCOUNT simplifications. */
5090 (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5091 BUILT_IN_POPCOUNTIMAX)
5092 /* popcount(X&1) is nop_expr(X&1). */
5093 (simplify
5094 (popcount @0)
5095 (if (tree_nonzero_bits (@0) == 1)
5096 (convert @0)))
5097 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5098 (simplify
5099 (plus (popcount:s @0) (popcount:s @1))
5100 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5101 (popcount (bit_ior @0 @1))))
5102 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5103 (for cmp (le eq ne gt)
5104 rep (eq eq ne ne)
5105 (simplify
5106 (cmp (popcount @0) integer_zerop)
5107 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5108
5109 /* Simplify:
5110
5111 a = a1 op a2
5112 r = c ? a : b;
5113
5114 to:
5115
5116 r = c ? a1 op a2 : b;
5117
5118 if the target can do it in one go. This makes the operation conditional
5119 on c, so could drop potentially-trapping arithmetic, but that's a valid
5120 simplification if the result of the operation isn't needed. */
5121 (for uncond_op (UNCOND_BINARY)
5122 cond_op (COND_BINARY)
5123 (simplify
5124 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5125 (with { tree op_type = TREE_TYPE (@4); }
5126 (if (element_precision (type) == element_precision (op_type))
5127 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5128 (simplify
5129 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5130 (with { tree op_type = TREE_TYPE (@4); }
5131 (if (element_precision (type) == element_precision (op_type))
5132 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
5133
5134 /* Same for ternary operations. */
5135 (for uncond_op (UNCOND_TERNARY)
5136 cond_op (COND_TERNARY)
5137 (simplify
5138 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5139 (with { tree op_type = TREE_TYPE (@5); }
5140 (if (element_precision (type) == element_precision (op_type))
5141 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5142 (simplify
5143 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5144 (with { tree op_type = TREE_TYPE (@5); }
5145 (if (element_precision (type) == element_precision (op_type))
5146 (view_convert (cond_op (bit_not @0) @2 @3 @4
5147 (view_convert:op_type @1)))))))
5148
5149 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
5150 "else" value of an IFN_COND_*. */
5151 (for cond_op (COND_BINARY)
5152 (simplify
5153 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5154 (with { tree op_type = TREE_TYPE (@3); }
5155 (if (element_precision (type) == element_precision (op_type))
5156 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5157 (simplify
5158 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5159 (with { tree op_type = TREE_TYPE (@5); }
5160 (if (inverse_conditions_p (@0, @2)
5161 && element_precision (type) == element_precision (op_type))
5162 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
5163
5164 /* Same for ternary operations. */
5165 (for cond_op (COND_TERNARY)
5166 (simplify
5167 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5168 (with { tree op_type = TREE_TYPE (@4); }
5169 (if (element_precision (type) == element_precision (op_type))
5170 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5171 (simplify
5172 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5173 (with { tree op_type = TREE_TYPE (@6); }
5174 (if (inverse_conditions_p (@0, @2)
5175 && element_precision (type) == element_precision (op_type))
5176 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
5177
5178 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
5179 expressions like:
5180
5181 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5182 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5183
5184 If pointers are known not to wrap, B checks whether @1 bytes starting
5185 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5186 bytes. A is more efficiently tested as:
5187
5188 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5189
5190 The equivalent expression for B is given by replacing @1 with @1 - 1:
5191
5192 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5193
5194 @0 and @2 can be swapped in both expressions without changing the result.
5195
5196 The folds rely on sizetype's being unsigned (which is always true)
5197 and on its being the same width as the pointer (which we have to check).
5198
5199 The fold replaces two pointer_plus expressions, two comparisons and
5200 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5201 the best case it's a saving of two operations. The A fold retains one
5202 of the original pointer_pluses, so is a win even if both pointer_pluses
5203 are used elsewhere. The B fold is a wash if both pointer_pluses are
5204 used elsewhere, since all we end up doing is replacing a comparison with
5205 a pointer_plus. We do still apply the fold under those circumstances
5206 though, in case applying it to other conditions eventually makes one of the
5207 pointer_pluses dead. */
5208 (for ior (truth_orif truth_or bit_ior)
5209 (for cmp (le lt)
5210 (simplify
5211 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5212 (cmp:cs (pointer_plus@4 @2 @1) @0))
5213 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5214 && TYPE_OVERFLOW_WRAPS (sizetype)
5215 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5216 /* Calculate the rhs constant. */
5217 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5218 offset_int rhs = off * 2; }
5219 /* Always fails for negative values. */
5220 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5221 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5222 pick a canonical order. This increases the chances of using the
5223 same pointer_plus in multiple checks. */
5224 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5225 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5226 (if (cmp == LT_EXPR)
5227 (gt (convert:sizetype
5228 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5229 { swap_p ? @0 : @2; }))
5230 { rhs_tree; })
5231 (gt (convert:sizetype
5232 (pointer_diff:ssizetype
5233 (pointer_plus { swap_p ? @2 : @0; }
5234 { wide_int_to_tree (sizetype, off); })
5235 { swap_p ? @0 : @2; }))
5236 { rhs_tree; })))))))))