]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/match.pd
Don't build readline/libreadline.a, when --with-system-readline is supplied
[thirdparty/gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.cc
3 and generic-match.cc from it.
4
5 Copyright (C) 2014-2022 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 initializer_each_zero_or_onep
33 CONSTANT_CLASS_P
34 tree_expr_nonnegative_p
35 tree_expr_nonzero_p
36 integer_valued_real_p
37 integer_pow2p
38 uniform_integer_cst_p
39 HONOR_NANS
40 uniform_vector_p
41 expand_vec_cmp_expr_p
42 bitmask_inv_cst_vector_p)
43
44 /* Operator lists. */
45 (define_operator_list tcc_comparison
46 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
47 (define_operator_list inverted_tcc_comparison
48 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
49 (define_operator_list inverted_tcc_comparison_with_nans
50 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
51 (define_operator_list swapped_tcc_comparison
52 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
53 (define_operator_list simple_comparison lt le eq ne ge gt)
54 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
55
56 #include "cfn-operators.pd"
57
58 /* Define operand lists for math rounding functions {,i,l,ll}FN,
59 where the versions prefixed with "i" return an int, those prefixed with
60 "l" return a long and those prefixed with "ll" return a long long.
61
62 Also define operand lists:
63
64 X<FN>F for all float functions, in the order i, l, ll
65 X<FN> for all double functions, in the same order
66 X<FN>L for all long double functions, in the same order. */
67 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
68 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
69 BUILT_IN_L##FN##F \
70 BUILT_IN_LL##FN##F) \
71 (define_operator_list X##FN BUILT_IN_I##FN \
72 BUILT_IN_L##FN \
73 BUILT_IN_LL##FN) \
74 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
75 BUILT_IN_L##FN##L \
76 BUILT_IN_LL##FN##L)
77
78 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
79 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
80 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
81 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
82
83 /* Unary operations and their associated IFN_COND_* function. */
84 (define_operator_list UNCOND_UNARY
85 negate)
86 (define_operator_list COND_UNARY
87 IFN_COND_NEG)
88
89 /* Binary operations and their associated IFN_COND_* function. */
90 (define_operator_list UNCOND_BINARY
91 plus minus
92 mult trunc_div trunc_mod rdiv
93 min max
94 IFN_FMIN IFN_FMAX
95 bit_and bit_ior bit_xor
96 lshift rshift)
97 (define_operator_list COND_BINARY
98 IFN_COND_ADD IFN_COND_SUB
99 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
100 IFN_COND_MIN IFN_COND_MAX
101 IFN_COND_FMIN IFN_COND_FMAX
102 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
103 IFN_COND_SHL IFN_COND_SHR)
104
105 /* Same for ternary operations. */
106 (define_operator_list UNCOND_TERNARY
107 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
108 (define_operator_list COND_TERNARY
109 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
110
111 /* __atomic_fetch_or_*, __atomic_fetch_xor_*, __atomic_xor_fetch_* */
112 (define_operator_list ATOMIC_FETCH_OR_XOR_N
113 BUILT_IN_ATOMIC_FETCH_OR_1 BUILT_IN_ATOMIC_FETCH_OR_2
114 BUILT_IN_ATOMIC_FETCH_OR_4 BUILT_IN_ATOMIC_FETCH_OR_8
115 BUILT_IN_ATOMIC_FETCH_OR_16
116 BUILT_IN_ATOMIC_FETCH_XOR_1 BUILT_IN_ATOMIC_FETCH_XOR_2
117 BUILT_IN_ATOMIC_FETCH_XOR_4 BUILT_IN_ATOMIC_FETCH_XOR_8
118 BUILT_IN_ATOMIC_FETCH_XOR_16
119 BUILT_IN_ATOMIC_XOR_FETCH_1 BUILT_IN_ATOMIC_XOR_FETCH_2
120 BUILT_IN_ATOMIC_XOR_FETCH_4 BUILT_IN_ATOMIC_XOR_FETCH_8
121 BUILT_IN_ATOMIC_XOR_FETCH_16)
122 /* __sync_fetch_and_or_*, __sync_fetch_and_xor_*, __sync_xor_and_fetch_* */
123 (define_operator_list SYNC_FETCH_OR_XOR_N
124 BUILT_IN_SYNC_FETCH_AND_OR_1 BUILT_IN_SYNC_FETCH_AND_OR_2
125 BUILT_IN_SYNC_FETCH_AND_OR_4 BUILT_IN_SYNC_FETCH_AND_OR_8
126 BUILT_IN_SYNC_FETCH_AND_OR_16
127 BUILT_IN_SYNC_FETCH_AND_XOR_1 BUILT_IN_SYNC_FETCH_AND_XOR_2
128 BUILT_IN_SYNC_FETCH_AND_XOR_4 BUILT_IN_SYNC_FETCH_AND_XOR_8
129 BUILT_IN_SYNC_FETCH_AND_XOR_16
130 BUILT_IN_SYNC_XOR_AND_FETCH_1 BUILT_IN_SYNC_XOR_AND_FETCH_2
131 BUILT_IN_SYNC_XOR_AND_FETCH_4 BUILT_IN_SYNC_XOR_AND_FETCH_8
132 BUILT_IN_SYNC_XOR_AND_FETCH_16)
133 /* __atomic_fetch_and_*. */
134 (define_operator_list ATOMIC_FETCH_AND_N
135 BUILT_IN_ATOMIC_FETCH_AND_1 BUILT_IN_ATOMIC_FETCH_AND_2
136 BUILT_IN_ATOMIC_FETCH_AND_4 BUILT_IN_ATOMIC_FETCH_AND_8
137 BUILT_IN_ATOMIC_FETCH_AND_16)
138 /* __sync_fetch_and_and_*. */
139 (define_operator_list SYNC_FETCH_AND_AND_N
140 BUILT_IN_SYNC_FETCH_AND_AND_1 BUILT_IN_SYNC_FETCH_AND_AND_2
141 BUILT_IN_SYNC_FETCH_AND_AND_4 BUILT_IN_SYNC_FETCH_AND_AND_8
142 BUILT_IN_SYNC_FETCH_AND_AND_16)
143
144 /* With nop_convert? combine convert? and view_convert? in one pattern
145 plus conditionalize on tree_nop_conversion_p conversions. */
146 (match (nop_convert @0)
147 (convert @0)
148 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
149 (match (nop_convert @0)
150 (view_convert @0)
151 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
152 && known_eq (TYPE_VECTOR_SUBPARTS (type),
153 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
154 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
155
156 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
157 ABSU_EXPR returns unsigned absolute value of the operand and the operand
158 of the ABSU_EXPR will have the corresponding signed type. */
159 (simplify (abs (convert @0))
160 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
161 && !TYPE_UNSIGNED (TREE_TYPE (@0))
162 && element_precision (type) > element_precision (TREE_TYPE (@0)))
163 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
164 (convert (absu:utype @0)))))
165
166 #if GIMPLE
167 /* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */
168 (simplify
169 (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2)
170 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
171 && !TYPE_UNSIGNED (TREE_TYPE (@0))
172 && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1)
173 (abs @0)))
174 #endif
175
176 /* Simplifications of operations with one constant operand and
177 simplifications to constants or single values. */
178
179 (for op (plus pointer_plus minus bit_ior bit_xor)
180 (simplify
181 (op @0 integer_zerop)
182 (non_lvalue @0)))
183
184 /* 0 +p index -> (type)index */
185 (simplify
186 (pointer_plus integer_zerop @1)
187 (non_lvalue (convert @1)))
188
189 /* ptr - 0 -> (type)ptr */
190 (simplify
191 (pointer_diff @0 integer_zerop)
192 (convert @0))
193
194 /* See if ARG1 is zero and X + ARG1 reduces to X.
195 Likewise if the operands are reversed. */
196 (simplify
197 (plus:c @0 real_zerop@1)
198 (if (fold_real_zero_addition_p (type, @0, @1, 0))
199 (non_lvalue @0)))
200
201 /* See if ARG1 is zero and X - ARG1 reduces to X. */
202 (simplify
203 (minus @0 real_zerop@1)
204 (if (fold_real_zero_addition_p (type, @0, @1, 1))
205 (non_lvalue @0)))
206
207 /* Even if the fold_real_zero_addition_p can't simplify X + 0.0
208 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
209 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
210 if not -frounding-math. For sNaNs the first operation would raise
211 exceptions but turn the result into qNan, so the second operation
212 would not raise it. */
213 (for inner_op (plus minus)
214 (for outer_op (plus minus)
215 (simplify
216 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
217 (if (real_zerop (@1)
218 && real_zerop (@2)
219 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
220 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
221 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
222 bool outer_plus
223 = ((outer_op == PLUS_EXPR)
224 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
225 (if (outer_plus && !inner_plus)
226 (outer_op @0 @2)
227 @3))))))
228
229 /* Simplify x - x.
230 This is unsafe for certain floats even in non-IEEE formats.
231 In IEEE, it is unsafe because it does wrong for NaNs.
232 PR middle-end/98420: x - x may be -0.0 with FE_DOWNWARD.
233 Also note that operand_equal_p is always false if an operand
234 is volatile. */
235 (simplify
236 (minus @0 @0)
237 (if (!FLOAT_TYPE_P (type)
238 || (!tree_expr_maybe_nan_p (@0)
239 && !tree_expr_maybe_infinite_p (@0)
240 && (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
241 || !HONOR_SIGNED_ZEROS (type))))
242 { build_zero_cst (type); }))
243 (simplify
244 (pointer_diff @@0 @0)
245 { build_zero_cst (type); })
246
247 (simplify
248 (mult @0 integer_zerop@1)
249 @1)
250
251 /* -x == x -> x == 0 */
252 (for cmp (eq ne)
253 (simplify
254 (cmp:c @0 (negate @0))
255 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
256 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE(@0)))
257 (cmp @0 { build_zero_cst (TREE_TYPE(@0)); }))))
258
259 /* Maybe fold x * 0 to 0. The expressions aren't the same
260 when x is NaN, since x * 0 is also NaN. Nor are they the
261 same in modes with signed zeros, since multiplying a
262 negative value by 0 gives -0, not +0. Nor when x is +-Inf,
263 since x * 0 is NaN. */
264 (simplify
265 (mult @0 real_zerop@1)
266 (if (!tree_expr_maybe_nan_p (@0)
267 && (!HONOR_NANS (type) || !tree_expr_maybe_infinite_p (@0))
268 && (!HONOR_SIGNED_ZEROS (type) || tree_expr_nonnegative_p (@0)))
269 @1))
270
271 /* In IEEE floating point, x*1 is not equivalent to x for snans.
272 Likewise for complex arithmetic with signed zeros. */
273 (simplify
274 (mult @0 real_onep)
275 (if (!tree_expr_maybe_signaling_nan_p (@0)
276 && (!HONOR_SIGNED_ZEROS (type)
277 || !COMPLEX_FLOAT_TYPE_P (type)))
278 (non_lvalue @0)))
279
280 /* Transform x * -1.0 into -x. */
281 (simplify
282 (mult @0 real_minus_onep)
283 (if (!tree_expr_maybe_signaling_nan_p (@0)
284 && (!HONOR_SIGNED_ZEROS (type)
285 || !COMPLEX_FLOAT_TYPE_P (type)))
286 (negate @0)))
287
288 /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
289 unless the target has native support for the former but not the latter. */
290 (simplify
291 (mult @0 VECTOR_CST@1)
292 (if (initializer_each_zero_or_onep (@1)
293 && !HONOR_SNANS (type)
294 && !HONOR_SIGNED_ZEROS (type))
295 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
296 (if (itype
297 && (!VECTOR_MODE_P (TYPE_MODE (type))
298 || (VECTOR_MODE_P (TYPE_MODE (itype))
299 && optab_handler (and_optab,
300 TYPE_MODE (itype)) != CODE_FOR_nothing)))
301 (view_convert (bit_and:itype (view_convert @0)
302 (ne @1 { build_zero_cst (type); })))))))
303
304 (for cmp (gt ge lt le)
305 outp (convert convert negate negate)
306 outn (negate negate convert convert)
307 /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */
308 /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */
309 /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */
310 /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */
311 (simplify
312 (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep))
313 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
314 (outp (abs @0))))
315 /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */
316 /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */
317 /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */
318 /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */
319 (simplify
320 (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1))
321 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
322 (outn (abs @0)))))
323
324 /* Transform X * copysign (1.0, X) into abs(X). */
325 (simplify
326 (mult:c @0 (COPYSIGN_ALL real_onep @0))
327 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
328 (abs @0)))
329
330 /* Transform X * copysign (1.0, -X) into -abs(X). */
331 (simplify
332 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
333 (if (!tree_expr_maybe_nan_p (@0) && !HONOR_SIGNED_ZEROS (type))
334 (negate (abs @0))))
335
336 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
337 (simplify
338 (COPYSIGN_ALL REAL_CST@0 @1)
339 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
340 (COPYSIGN_ALL (negate @0) @1)))
341
342 /* (x >= 0 ? x : 0) + (x <= 0 ? -x : 0) -> abs x. */
343 (simplify
344 (plus:c (max @0 integer_zerop) (max (negate @0) integer_zerop))
345 (abs @0))
346
347 /* X * 1, X / 1 -> X. */
348 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
349 (simplify
350 (op @0 integer_onep)
351 (non_lvalue @0)))
352
353 /* (A / (1 << B)) -> (A >> B).
354 Only for unsigned A. For signed A, this would not preserve rounding
355 toward zero.
356 For example: (-1 / ( 1 << B)) != -1 >> B.
357 Also also widening conversions, like:
358 (A / (unsigned long long) (1U << B)) -> (A >> B)
359 or
360 (A / (unsigned long long) (1 << B)) -> (A >> B).
361 If the left shift is signed, it can be done only if the upper bits
362 of A starting from shift's type sign bit are zero, as
363 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
364 so it is valid only if A >> 31 is zero. */
365 (simplify
366 (trunc_div (convert?@0 @3) (convert2? (lshift integer_onep@1 @2)))
367 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
368 && (!VECTOR_TYPE_P (type)
369 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
370 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
371 && (useless_type_conversion_p (type, TREE_TYPE (@1))
372 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
373 && (TYPE_UNSIGNED (TREE_TYPE (@1))
374 || (element_precision (type)
375 == element_precision (TREE_TYPE (@1)))
376 || (INTEGRAL_TYPE_P (type)
377 && (tree_nonzero_bits (@0)
378 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
379 true,
380 element_precision (type))) == 0)))))
381 (if (!VECTOR_TYPE_P (type)
382 && useless_type_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))
383 && element_precision (TREE_TYPE (@3)) < element_precision (type))
384 (convert (rshift @3 @2))
385 (rshift @0 @2))))
386
387 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
388 undefined behavior in constexpr evaluation, and assuming that the division
389 traps enables better optimizations than these anyway. */
390 (for div (trunc_div ceil_div floor_div round_div exact_div)
391 /* 0 / X is always zero. */
392 (simplify
393 (div integer_zerop@0 @1)
394 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
395 (if (!integer_zerop (@1))
396 @0))
397 /* X / -1 is -X. */
398 (simplify
399 (div @0 integer_minus_onep@1)
400 (if (!TYPE_UNSIGNED (type))
401 (negate @0)))
402 /* X / bool_range_Y is X. */
403 (simplify
404 (div @0 SSA_NAME@1)
405 (if (INTEGRAL_TYPE_P (type)
406 && ssa_name_has_boolean_range (@1)
407 && !flag_non_call_exceptions)
408 @0))
409 /* X / X is one. */
410 (simplify
411 (div @0 @0)
412 /* But not for 0 / 0 so that we can get the proper warnings and errors.
413 And not for _Fract types where we can't build 1. */
414 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))
415 && !integer_zerop (@0)
416 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
417 { build_one_cst (type); }))
418 /* X / abs (X) is X < 0 ? -1 : 1. */
419 (simplify
420 (div:C @0 (abs @0))
421 (if (INTEGRAL_TYPE_P (type)
422 && TYPE_OVERFLOW_UNDEFINED (type)
423 && !integer_zerop (@0)
424 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
425 (cond (lt @0 { build_zero_cst (type); })
426 { build_minus_one_cst (type); } { build_one_cst (type); })))
427 /* X / -X is -1. */
428 (simplify
429 (div:C @0 (negate @0))
430 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
431 && TYPE_OVERFLOW_UNDEFINED (type)
432 && !integer_zerop (@0)
433 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@0)))
434 { build_minus_one_cst (type); })))
435
436 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
437 TRUNC_DIV_EXPR. Rewrite into the latter in this case. Similarly
438 for MOD instead of DIV. */
439 (for floor_divmod (floor_div floor_mod)
440 trunc_divmod (trunc_div trunc_mod)
441 (simplify
442 (floor_divmod @0 @1)
443 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
444 && TYPE_UNSIGNED (type))
445 (trunc_divmod @0 @1))))
446
447 /* 1 / X -> X == 1 for unsigned integer X.
448 1 / X -> X >= -1 && X <= 1 ? X : 0 for signed integer X.
449 But not for 1 / 0 so that we can get proper warnings and errors,
450 and not for 1-bit integers as they are edge cases better handled
451 elsewhere. */
452 (simplify
453 (trunc_div integer_onep@0 @1)
454 (if (INTEGRAL_TYPE_P (type)
455 && TYPE_PRECISION (type) > 1
456 && !integer_zerop (@1)
457 && (!flag_non_call_exceptions || tree_expr_nonzero_p (@1)))
458 (if (TYPE_UNSIGNED (type))
459 (convert (eq:boolean_type_node @1 { build_one_cst (type); }))
460 (with { tree utype = unsigned_type_for (type); }
461 (cond (le (plus (convert:utype @1) { build_one_cst (utype); })
462 { build_int_cst (utype, 2); })
463 @1 { build_zero_cst (type); })))))
464
465 /* Combine two successive divisions. Note that combining ceil_div
466 and floor_div is trickier and combining round_div even more so. */
467 (for div (trunc_div exact_div)
468 (simplify
469 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
470 (with {
471 wi::overflow_type overflow;
472 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
473 TYPE_SIGN (type), &overflow);
474 }
475 (if (div == EXACT_DIV_EXPR
476 || optimize_successive_divisions_p (@2, @3))
477 (if (!overflow)
478 (div @0 { wide_int_to_tree (type, mul); })
479 (if (TYPE_UNSIGNED (type)
480 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
481 { build_zero_cst (type); }))))))
482
483 /* Combine successive multiplications. Similar to above, but handling
484 overflow is different. */
485 (simplify
486 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
487 (with {
488 wi::overflow_type overflow;
489 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
490 TYPE_SIGN (type), &overflow);
491 }
492 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
493 otherwise undefined overflow implies that @0 must be zero. */
494 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
495 (mult @0 { wide_int_to_tree (type, mul); }))))
496
497 /* Similar to above, but there could be an extra add/sub between
498 successive multuiplications. */
499 (simplify
500 (mult (plus:s (mult:s@4 @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
501 (with {
502 bool overflowed = true;
503 wi::overflow_type ovf1, ovf2;
504 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@3),
505 TYPE_SIGN (type), &ovf1);
506 wide_int add = wi::mul (wi::to_wide (@2), wi::to_wide (@3),
507 TYPE_SIGN (type), &ovf2);
508 if (TYPE_OVERFLOW_UNDEFINED (type))
509 {
510 #if GIMPLE
511 value_range vr0;
512 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE
513 && get_global_range_query ()->range_of_expr (vr0, @4)
514 && vr0.kind () == VR_RANGE)
515 {
516 wide_int wmin0 = vr0.lower_bound ();
517 wide_int wmax0 = vr0.upper_bound ();
518 wmin0 = wi::mul (wmin0, wi::to_wide (@3), TYPE_SIGN (type), &ovf1);
519 wmax0 = wi::mul (wmax0, wi::to_wide (@3), TYPE_SIGN (type), &ovf2);
520 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
521 {
522 wi::add (wmin0, add, TYPE_SIGN (type), &ovf1);
523 wi::add (wmax0, add, TYPE_SIGN (type), &ovf2);
524 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
525 overflowed = false;
526 }
527 }
528 #endif
529 }
530 else
531 overflowed = false;
532 }
533 /* Skip folding on overflow. */
534 (if (!overflowed)
535 (plus (mult @0 { wide_int_to_tree (type, mul); })
536 { wide_int_to_tree (type, add); }))))
537
538 /* Similar to above, but a multiplication between successive additions. */
539 (simplify
540 (plus (mult:s (plus:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
541 (with {
542 bool overflowed = true;
543 wi::overflow_type ovf1;
544 wi::overflow_type ovf2;
545 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
546 TYPE_SIGN (type), &ovf1);
547 wide_int add = wi::add (mul, wi::to_wide (@3),
548 TYPE_SIGN (type), &ovf2);
549 if (TYPE_OVERFLOW_UNDEFINED (type))
550 {
551 #if GIMPLE
552 value_range vr0;
553 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE
554 && get_global_range_query ()->range_of_expr (vr0, @0)
555 && vr0.kind () == VR_RANGE)
556 {
557 wide_int wmin0 = vr0.lower_bound ();
558 wide_int wmax0 = vr0.upper_bound ();
559 wmin0 = wi::mul (wmin0, wi::to_wide (@2), TYPE_SIGN (type), &ovf1);
560 wmax0 = wi::mul (wmax0, wi::to_wide (@2), TYPE_SIGN (type), &ovf2);
561 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
562 {
563 wi::add (wmin0, mul, TYPE_SIGN (type), &ovf1);
564 wi::add (wmax0, mul, TYPE_SIGN (type), &ovf2);
565 if (ovf1 == wi::OVF_NONE && ovf2 == wi::OVF_NONE)
566 overflowed = false;
567 }
568 }
569 #endif
570 }
571 else
572 overflowed = false;
573 }
574 /* Skip folding on overflow. */
575 (if (!overflowed)
576 (plus (mult @0 @2) { wide_int_to_tree (type, add); }))))
577
578 /* Optimize A / A to 1.0 if we don't care about
579 NaNs or Infinities. */
580 (simplify
581 (rdiv @0 @0)
582 (if (FLOAT_TYPE_P (type)
583 && ! HONOR_NANS (type)
584 && ! HONOR_INFINITIES (type))
585 { build_one_cst (type); }))
586
587 /* Optimize -A / A to -1.0 if we don't care about
588 NaNs or Infinities. */
589 (simplify
590 (rdiv:C @0 (negate @0))
591 (if (FLOAT_TYPE_P (type)
592 && ! HONOR_NANS (type)
593 && ! HONOR_INFINITIES (type))
594 { build_minus_one_cst (type); }))
595
596 /* PR71078: x / abs(x) -> copysign (1.0, x) */
597 (simplify
598 (rdiv:C (convert? @0) (convert? (abs @0)))
599 (if (SCALAR_FLOAT_TYPE_P (type)
600 && ! HONOR_NANS (type)
601 && ! HONOR_INFINITIES (type))
602 (switch
603 (if (types_match (type, float_type_node))
604 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
605 (if (types_match (type, double_type_node))
606 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
607 (if (types_match (type, long_double_type_node))
608 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
609
610 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
611 (simplify
612 (rdiv @0 real_onep)
613 (if (!tree_expr_maybe_signaling_nan_p (@0))
614 (non_lvalue @0)))
615
616 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
617 (simplify
618 (rdiv @0 real_minus_onep)
619 (if (!tree_expr_maybe_signaling_nan_p (@0))
620 (negate @0)))
621
622 (if (flag_reciprocal_math)
623 /* Convert (A/B)/C to A/(B*C). */
624 (simplify
625 (rdiv (rdiv:s @0 @1) @2)
626 (rdiv @0 (mult @1 @2)))
627
628 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
629 (simplify
630 (rdiv @0 (mult:s @1 REAL_CST@2))
631 (with
632 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
633 (if (tem)
634 (rdiv (mult @0 { tem; } ) @1))))
635
636 /* Convert A/(B/C) to (A/B)*C */
637 (simplify
638 (rdiv @0 (rdiv:s @1 @2))
639 (mult (rdiv @0 @1) @2)))
640
641 /* Simplify x / (- y) to -x / y. */
642 (simplify
643 (rdiv @0 (negate @1))
644 (rdiv (negate @0) @1))
645
646 (if (flag_unsafe_math_optimizations)
647 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
648 Since C / x may underflow to zero, do this only for unsafe math. */
649 (for op (lt le gt ge)
650 neg_op (gt ge lt le)
651 (simplify
652 (op (rdiv REAL_CST@0 @1) real_zerop@2)
653 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
654 (switch
655 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
656 (op @1 @2))
657 /* For C < 0, use the inverted operator. */
658 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
659 (neg_op @1 @2)))))))
660
661 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
662 (for div (trunc_div ceil_div floor_div round_div exact_div)
663 (simplify
664 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
665 (if (integer_pow2p (@2)
666 && tree_int_cst_sgn (@2) > 0
667 && tree_nop_conversion_p (type, TREE_TYPE (@0))
668 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
669 (rshift (convert @0)
670 { build_int_cst (integer_type_node,
671 wi::exact_log2 (wi::to_wide (@2))); }))))
672
673 /* If ARG1 is a constant, we can convert this to a multiply by the
674 reciprocal. This does not have the same rounding properties,
675 so only do this if -freciprocal-math. We can actually
676 always safely do it if ARG1 is a power of two, but it's hard to
677 tell if it is or not in a portable manner. */
678 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
679 (simplify
680 (rdiv @0 cst@1)
681 (if (optimize)
682 (if (flag_reciprocal_math
683 && !real_zerop (@1))
684 (with
685 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
686 (if (tem)
687 (mult @0 { tem; } )))
688 (if (cst != COMPLEX_CST)
689 (with { tree inverse = exact_inverse (type, @1); }
690 (if (inverse)
691 (mult @0 { inverse; } ))))))))
692
693 (for mod (ceil_mod floor_mod round_mod trunc_mod)
694 /* 0 % X is always zero. */
695 (simplify
696 (mod integer_zerop@0 @1)
697 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
698 (if (!integer_zerop (@1))
699 @0))
700 /* X % 1 is always zero. */
701 (simplify
702 (mod @0 integer_onep)
703 { build_zero_cst (type); })
704 /* X % -1 is zero. */
705 (simplify
706 (mod @0 integer_minus_onep@1)
707 (if (!TYPE_UNSIGNED (type))
708 { build_zero_cst (type); }))
709 /* X % X is zero. */
710 (simplify
711 (mod @0 @0)
712 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
713 (if (!integer_zerop (@0))
714 { build_zero_cst (type); }))
715 /* (X % Y) % Y is just X % Y. */
716 (simplify
717 (mod (mod@2 @0 @1) @1)
718 @2)
719 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
720 (simplify
721 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
722 (if (ANY_INTEGRAL_TYPE_P (type)
723 && TYPE_OVERFLOW_UNDEFINED (type)
724 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
725 TYPE_SIGN (type)))
726 { build_zero_cst (type); }))
727 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
728 modulo and comparison, since it is simpler and equivalent. */
729 (for cmp (eq ne)
730 (simplify
731 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
732 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
733 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
734 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
735
736 /* X % -C is the same as X % C. */
737 (simplify
738 (trunc_mod @0 INTEGER_CST@1)
739 (if (TYPE_SIGN (type) == SIGNED
740 && !TREE_OVERFLOW (@1)
741 && wi::neg_p (wi::to_wide (@1))
742 && !TYPE_OVERFLOW_TRAPS (type)
743 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
744 && !sign_bit_p (@1, @1))
745 (trunc_mod @0 (negate @1))))
746
747 /* X % -Y is the same as X % Y. */
748 (simplify
749 (trunc_mod @0 (convert? (negate @1)))
750 (if (INTEGRAL_TYPE_P (type)
751 && !TYPE_UNSIGNED (type)
752 && !TYPE_OVERFLOW_TRAPS (type)
753 && tree_nop_conversion_p (type, TREE_TYPE (@1))
754 /* Avoid this transformation if X might be INT_MIN or
755 Y might be -1, because we would then change valid
756 INT_MIN % -(-1) into invalid INT_MIN % -1. */
757 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
758 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
759 (TREE_TYPE (@1))))))
760 (trunc_mod @0 (convert @1))))
761
762 /* X - (X / Y) * Y is the same as X % Y. */
763 (simplify
764 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
765 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
766 (convert (trunc_mod @0 @1))))
767
768 /* x * (1 + y / x) - y -> x - y % x */
769 (simplify
770 (minus (mult:cs @0 (plus:s (trunc_div:s @1 @0) integer_onep)) @1)
771 (if (INTEGRAL_TYPE_P (type))
772 (minus @0 (trunc_mod @1 @0))))
773
774 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
775 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
776 Also optimize A % (C << N) where C is a power of 2,
777 to A & ((C << N) - 1).
778 Also optimize "A shift (B % C)", if C is a power of 2, to
779 "A shift (B & (C - 1))". SHIFT operation include "<<" and ">>"
780 and assume (B % C) is nonnegative as shifts negative values would
781 be UB. */
782 (match (power_of_two_cand @1)
783 INTEGER_CST@1)
784 (match (power_of_two_cand @1)
785 (lshift INTEGER_CST@1 @2))
786 (for mod (trunc_mod floor_mod)
787 (for shift (lshift rshift)
788 (simplify
789 (shift @0 (mod @1 (power_of_two_cand@2 @3)))
790 (if (integer_pow2p (@3) && tree_int_cst_sgn (@3) > 0)
791 (shift @0 (bit_and @1 (minus @2 { build_int_cst (TREE_TYPE (@2),
792 1); }))))))
793 (simplify
794 (mod @0 (convert? (power_of_two_cand@1 @2)))
795 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
796 /* Allow any integral conversions of the divisor, except
797 conversion from narrower signed to wider unsigned type
798 where if @1 would be negative power of two, the divisor
799 would not be a power of two. */
800 && INTEGRAL_TYPE_P (type)
801 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
802 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
803 || TYPE_UNSIGNED (TREE_TYPE (@1))
804 || !TYPE_UNSIGNED (type))
805 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
806 (with { tree utype = TREE_TYPE (@1);
807 if (!TYPE_OVERFLOW_WRAPS (utype))
808 utype = unsigned_type_for (utype); }
809 (bit_and @0 (convert (minus (convert:utype @1)
810 { build_one_cst (utype); })))))))
811
812 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
813 (simplify
814 (trunc_div (mult @0 integer_pow2p@1) @1)
815 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0)))
816 (bit_and @0 { wide_int_to_tree
817 (type, wi::mask (TYPE_PRECISION (type)
818 - wi::exact_log2 (wi::to_wide (@1)),
819 false, TYPE_PRECISION (type))); })))
820
821 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
822 (simplify
823 (mult (trunc_div @0 integer_pow2p@1) @1)
824 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0)))
825 (bit_and @0 (negate @1))))
826
827 /* Simplify (t * 2) / 2) -> t. */
828 (for div (trunc_div ceil_div floor_div round_div exact_div)
829 (simplify
830 (div (mult:c @0 @1) @1)
831 (if (ANY_INTEGRAL_TYPE_P (type))
832 (if (TYPE_OVERFLOW_UNDEFINED (type))
833 @0
834 #if GIMPLE
835 (with
836 {
837 bool overflowed = true;
838 value_range vr0, vr1;
839 if (INTEGRAL_TYPE_P (type)
840 && get_global_range_query ()->range_of_expr (vr0, @0)
841 && get_global_range_query ()->range_of_expr (vr1, @1)
842 && vr0.kind () == VR_RANGE
843 && vr1.kind () == VR_RANGE)
844 {
845 wide_int wmin0 = vr0.lower_bound ();
846 wide_int wmax0 = vr0.upper_bound ();
847 wide_int wmin1 = vr1.lower_bound ();
848 wide_int wmax1 = vr1.upper_bound ();
849 /* If the multiplication can't overflow/wrap around, then
850 it can be optimized too. */
851 wi::overflow_type min_ovf, max_ovf;
852 wi::mul (wmin0, wmin1, TYPE_SIGN (type), &min_ovf);
853 wi::mul (wmax0, wmax1, TYPE_SIGN (type), &max_ovf);
854 if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
855 {
856 wi::mul (wmin0, wmax1, TYPE_SIGN (type), &min_ovf);
857 wi::mul (wmax0, wmin1, TYPE_SIGN (type), &max_ovf);
858 if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
859 overflowed = false;
860 }
861 }
862 }
863 (if (!overflowed)
864 @0))
865 #endif
866 ))))
867
868 (for op (negate abs)
869 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
870 (for coss (COS COSH)
871 (simplify
872 (coss (op @0))
873 (coss @0)))
874 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
875 (for pows (POW)
876 (simplify
877 (pows (op @0) REAL_CST@1)
878 (with { HOST_WIDE_INT n; }
879 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
880 (pows @0 @1)))))
881 /* Likewise for powi. */
882 (for pows (POWI)
883 (simplify
884 (pows (op @0) INTEGER_CST@1)
885 (if ((wi::to_wide (@1) & 1) == 0)
886 (pows @0 @1))))
887 /* Strip negate and abs from both operands of hypot. */
888 (for hypots (HYPOT)
889 (simplify
890 (hypots (op @0) @1)
891 (hypots @0 @1))
892 (simplify
893 (hypots @0 (op @1))
894 (hypots @0 @1)))
895 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
896 (for copysigns (COPYSIGN_ALL)
897 (simplify
898 (copysigns (op @0) @1)
899 (copysigns @0 @1))))
900
901 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
902 (simplify
903 (mult (abs@1 @0) @1)
904 (mult @0 @0))
905
906 /* Convert absu(x)*absu(x) -> x*x. */
907 (simplify
908 (mult (absu@1 @0) @1)
909 (mult (convert@2 @0) @2))
910
911 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
912 (for coss (COS COSH)
913 copysigns (COPYSIGN)
914 (simplify
915 (coss (copysigns @0 @1))
916 (coss @0)))
917
918 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
919 (for pows (POW)
920 copysigns (COPYSIGN)
921 (simplify
922 (pows (copysigns @0 @2) REAL_CST@1)
923 (with { HOST_WIDE_INT n; }
924 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
925 (pows @0 @1)))))
926 /* Likewise for powi. */
927 (for pows (POWI)
928 copysigns (COPYSIGN)
929 (simplify
930 (pows (copysigns @0 @2) INTEGER_CST@1)
931 (if ((wi::to_wide (@1) & 1) == 0)
932 (pows @0 @1))))
933
934 (for hypots (HYPOT)
935 copysigns (COPYSIGN)
936 /* hypot(copysign(x, y), z) -> hypot(x, z). */
937 (simplify
938 (hypots (copysigns @0 @1) @2)
939 (hypots @0 @2))
940 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
941 (simplify
942 (hypots @0 (copysigns @1 @2))
943 (hypots @0 @1)))
944
945 /* copysign(x, CST) -> [-]abs (x). */
946 (for copysigns (COPYSIGN_ALL)
947 (simplify
948 (copysigns @0 REAL_CST@1)
949 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
950 (negate (abs @0))
951 (abs @0))))
952
953 /* copysign(copysign(x, y), z) -> copysign(x, z). */
954 (for copysigns (COPYSIGN_ALL)
955 (simplify
956 (copysigns (copysigns @0 @1) @2)
957 (copysigns @0 @2)))
958
959 /* copysign(x,y)*copysign(x,y) -> x*x. */
960 (for copysigns (COPYSIGN_ALL)
961 (simplify
962 (mult (copysigns@2 @0 @1) @2)
963 (mult @0 @0)))
964
965 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
966 (for ccoss (CCOS CCOSH)
967 (simplify
968 (ccoss (negate @0))
969 (ccoss @0)))
970
971 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
972 (for ops (conj negate)
973 (for cabss (CABS)
974 (simplify
975 (cabss (ops @0))
976 (cabss @0))))
977
978 /* Fold (a * (1 << b)) into (a << b) */
979 (simplify
980 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
981 (if (! FLOAT_TYPE_P (type)
982 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
983 (lshift @0 @2)))
984
985 /* Shifts by constants distribute over several binary operations,
986 hence (X << C) + (Y << C) can be simplified to (X + Y) << C. */
987 (for op (plus minus)
988 (simplify
989 (op (lshift:s @0 @1) (lshift:s @2 @1))
990 (if (INTEGRAL_TYPE_P (type)
991 && TYPE_OVERFLOW_WRAPS (type)
992 && !TYPE_SATURATING (type))
993 (lshift (op @0 @2) @1))))
994
995 (for op (bit_and bit_ior bit_xor)
996 (simplify
997 (op (lshift:s @0 @1) (lshift:s @2 @1))
998 (if (INTEGRAL_TYPE_P (type))
999 (lshift (op @0 @2) @1)))
1000 (simplify
1001 (op (rshift:s @0 @1) (rshift:s @2 @1))
1002 (if (INTEGRAL_TYPE_P (type))
1003 (rshift (op @0 @2) @1))))
1004
1005 /* Fold (1 << (C - x)) where C = precision(type) - 1
1006 into ((1 << C) >> x). */
1007 (simplify
1008 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
1009 (if (INTEGRAL_TYPE_P (type)
1010 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
1011 && single_use (@1))
1012 (if (TYPE_UNSIGNED (type))
1013 (rshift (lshift @0 @2) @3)
1014 (with
1015 { tree utype = unsigned_type_for (type); }
1016 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
1017
1018 /* Fold ((type)(a<0)) << SIGNBITOFA into ((type)a) & signbit. */
1019 (simplify
1020 (lshift (convert (lt @0 integer_zerop@1)) INTEGER_CST@2)
1021 (if (TYPE_SIGN (TREE_TYPE (@0)) == SIGNED
1022 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0)) - 1))
1023 (with { wide_int wone = wi::one (TYPE_PRECISION (type)); }
1024 (bit_and (convert @0)
1025 { wide_int_to_tree (type,
1026 wi::lshift (wone, wi::to_wide (@2))); }))))
1027
1028 /* Fold (-x >> C) into -(x > 0) where C = precision(type) - 1. */
1029 (for cst (INTEGER_CST VECTOR_CST)
1030 (simplify
1031 (rshift (negate:s @0) cst@1)
1032 (if (!TYPE_UNSIGNED (type)
1033 && TYPE_OVERFLOW_UNDEFINED (type))
1034 (with { tree stype = TREE_TYPE (@1);
1035 tree bt = truth_type_for (type);
1036 tree zeros = build_zero_cst (type);
1037 tree cst = NULL_TREE; }
1038 (switch
1039 /* Handle scalar case. */
1040 (if (INTEGRAL_TYPE_P (type)
1041 /* If we apply the rule to the scalar type before vectorization
1042 we will enforce the result of the comparison being a bool
1043 which will require an extra AND on the result that will be
1044 indistinguishable from when the user did actually want 0
1045 or 1 as the result so it can't be removed. */
1046 && canonicalize_math_after_vectorization_p ()
1047 && wi::eq_p (wi::to_wide (@1), TYPE_PRECISION (type) - 1))
1048 (negate (convert (gt @0 { zeros; }))))
1049 /* Handle vector case. */
1050 (if (VECTOR_INTEGER_TYPE_P (type)
1051 /* First check whether the target has the same mode for vector
1052 comparison results as it's operands do. */
1053 && TYPE_MODE (bt) == TYPE_MODE (type)
1054 /* Then check to see if the target is able to expand the comparison
1055 with the given type later on, otherwise we may ICE. */
1056 && expand_vec_cmp_expr_p (type, bt, GT_EXPR)
1057 && (cst = uniform_integer_cst_p (@1)) != NULL
1058 && wi::eq_p (wi::to_wide (cst), element_precision (type) - 1))
1059 (view_convert (gt:bt @0 { zeros; }))))))))
1060
1061 /* Fold (C1/X)*C2 into (C1*C2)/X. */
1062 (simplify
1063 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
1064 (if (flag_associative_math
1065 && single_use (@3))
1066 (with
1067 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
1068 (if (tem)
1069 (rdiv { tem; } @1)))))
1070
1071 /* Simplify ~X & X as zero. */
1072 (simplify
1073 (bit_and:c (convert? @0) (convert? (bit_not @0)))
1074 { build_zero_cst (type); })
1075
1076 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
1077 (simplify
1078 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
1079 (if (TYPE_UNSIGNED (type))
1080 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
1081
1082 (for bitop (bit_and bit_ior)
1083 cmp (eq ne)
1084 /* PR35691: Transform
1085 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
1086 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
1087 (simplify
1088 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
1089 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1090 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
1091 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1092 (cmp (bit_ior @0 (convert @1)) @2)))
1093 /* Transform:
1094 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
1095 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
1096 (simplify
1097 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
1098 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1099 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
1100 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1101 (cmp (bit_and @0 (convert @1)) @2))))
1102
1103 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
1104 (simplify
1105 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
1106 (minus (bit_xor @0 @1) @1))
1107 (simplify
1108 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
1109 (if (~wi::to_wide (@2) == wi::to_wide (@1))
1110 (minus (bit_xor @0 @1) @1)))
1111
1112 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
1113 (simplify
1114 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
1115 (minus @1 (bit_xor @0 @1)))
1116
1117 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
1118 (for op (bit_ior bit_xor plus)
1119 (simplify
1120 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
1121 (bit_xor @0 @1))
1122 (simplify
1123 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
1124 (if (~wi::to_wide (@2) == wi::to_wide (@1))
1125 (bit_xor @0 @1))))
1126
1127 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
1128 (simplify
1129 (bit_ior:c (bit_xor:c @0 @1) @0)
1130 (bit_ior @0 @1))
1131
1132 /* (a & ~b) | (a ^ b) --> a ^ b */
1133 (simplify
1134 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
1135 @2)
1136
1137 /* (a & ~b) ^ ~a --> ~(a & b) */
1138 (simplify
1139 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
1140 (bit_not (bit_and @0 @1)))
1141
1142 /* (~a & b) ^ a --> (a | b) */
1143 (simplify
1144 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
1145 (bit_ior @0 @1))
1146
1147 /* (a | b) & ~(a ^ b) --> a & b */
1148 (simplify
1149 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
1150 (bit_and @0 @1))
1151
1152 /* a | ~(a ^ b) --> a | ~b */
1153 (simplify
1154 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
1155 (bit_ior @0 (bit_not @1)))
1156
1157 /* (a | b) | (a &^ b) --> a | b */
1158 (for op (bit_and bit_xor)
1159 (simplify
1160 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
1161 @2))
1162
1163 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
1164 (simplify
1165 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
1166 @2)
1167
1168 /* ~(~a & b) --> a | ~b */
1169 (simplify
1170 (bit_not (bit_and:cs (bit_not @0) @1))
1171 (bit_ior @0 (bit_not @1)))
1172
1173 /* ~(~a | b) --> a & ~b */
1174 (simplify
1175 (bit_not (bit_ior:cs (bit_not @0) @1))
1176 (bit_and @0 (bit_not @1)))
1177
1178 /* (a ^ b) & ((b ^ c) ^ a) --> (a ^ b) & ~c */
1179 (simplify
1180 (bit_and:c (bit_xor:c@3 @0 @1) (bit_xor:cs (bit_xor:cs @1 @2) @0))
1181 (bit_and @3 (bit_not @2)))
1182
1183 /* (a ^ b) | ((b ^ c) ^ a) --> (a ^ b) | c */
1184 (simplify
1185 (bit_ior:c (bit_xor:c@3 @0 @1) (bit_xor:c (bit_xor:c @1 @2) @0))
1186 (bit_ior @3 @2))
1187
1188 /* (~X | C) ^ D -> (X | C) ^ (~D ^ C) if (~D ^ C) can be simplified. */
1189 (simplify
1190 (bit_xor:c (bit_ior:cs (bit_not:s @0) @1) @2)
1191 (bit_xor (bit_ior @0 @1) (bit_xor! (bit_not! @2) @1)))
1192
1193 /* (~X & C) ^ D -> (X & C) ^ (D ^ C) if (D ^ C) can be simplified. */
1194 (simplify
1195 (bit_xor:c (bit_and:cs (bit_not:s @0) @1) @2)
1196 (bit_xor (bit_and @0 @1) (bit_xor! @2 @1)))
1197
1198 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
1199 (simplify
1200 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
1201 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1202 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1203 (bit_xor @0 @1)))
1204
1205 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1206 ((A & N) + B) & M -> (A + B) & M
1207 Similarly if (N & M) == 0,
1208 ((A | N) + B) & M -> (A + B) & M
1209 and for - instead of + (or unary - instead of +)
1210 and/or ^ instead of |.
1211 If B is constant and (B & M) == 0, fold into A & M. */
1212 (for op (plus minus)
1213 (for bitop (bit_and bit_ior bit_xor)
1214 (simplify
1215 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
1216 (with
1217 { tree pmop[2];
1218 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
1219 @3, @4, @1, ERROR_MARK, NULL_TREE,
1220 NULL_TREE, pmop); }
1221 (if (utype)
1222 (convert (bit_and (op (convert:utype { pmop[0]; })
1223 (convert:utype { pmop[1]; }))
1224 (convert:utype @2))))))
1225 (simplify
1226 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
1227 (with
1228 { tree pmop[2];
1229 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
1230 NULL_TREE, NULL_TREE, @1, bitop, @3,
1231 @4, pmop); }
1232 (if (utype)
1233 (convert (bit_and (op (convert:utype { pmop[0]; })
1234 (convert:utype { pmop[1]; }))
1235 (convert:utype @2)))))))
1236 (simplify
1237 (bit_and (op:s @0 @1) INTEGER_CST@2)
1238 (with
1239 { tree pmop[2];
1240 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
1241 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
1242 NULL_TREE, NULL_TREE, pmop); }
1243 (if (utype)
1244 (convert (bit_and (op (convert:utype { pmop[0]; })
1245 (convert:utype { pmop[1]; }))
1246 (convert:utype @2)))))))
1247 (for bitop (bit_and bit_ior bit_xor)
1248 (simplify
1249 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
1250 (with
1251 { tree pmop[2];
1252 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
1253 bitop, @2, @3, NULL_TREE, ERROR_MARK,
1254 NULL_TREE, NULL_TREE, pmop); }
1255 (if (utype)
1256 (convert (bit_and (negate (convert:utype { pmop[0]; }))
1257 (convert:utype @1)))))))
1258
1259 /* X % Y is smaller than Y. */
1260 (for cmp (lt ge)
1261 (simplify
1262 (cmp (trunc_mod @0 @1) @1)
1263 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
1264 { constant_boolean_node (cmp == LT_EXPR, type); })))
1265 (for cmp (gt le)
1266 (simplify
1267 (cmp @1 (trunc_mod @0 @1))
1268 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
1269 { constant_boolean_node (cmp == GT_EXPR, type); })))
1270
1271 /* x | ~0 -> ~0 */
1272 (simplify
1273 (bit_ior @0 integer_all_onesp@1)
1274 @1)
1275
1276 /* x | 0 -> x */
1277 (simplify
1278 (bit_ior @0 integer_zerop)
1279 @0)
1280
1281 /* x & 0 -> 0 */
1282 (simplify
1283 (bit_and @0 integer_zerop@1)
1284 @1)
1285
1286 /* ~x | x -> -1 */
1287 /* ~x ^ x -> -1 */
1288 /* ~x + x -> -1 */
1289 (for op (bit_ior bit_xor plus)
1290 (simplify
1291 (op:c (convert? @0) (convert? (bit_not @0)))
1292 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
1293
1294 /* x ^ x -> 0 */
1295 (simplify
1296 (bit_xor @0 @0)
1297 { build_zero_cst (type); })
1298
1299 /* Canonicalize X ^ ~0 to ~X. */
1300 (simplify
1301 (bit_xor @0 integer_all_onesp@1)
1302 (bit_not @0))
1303
1304 /* x & ~0 -> x */
1305 (simplify
1306 (bit_and @0 integer_all_onesp)
1307 (non_lvalue @0))
1308
1309 /* x & x -> x, x | x -> x */
1310 (for bitop (bit_and bit_ior)
1311 (simplify
1312 (bitop @0 @0)
1313 (non_lvalue @0)))
1314
1315 /* x & C -> x if we know that x & ~C == 0. */
1316 #if GIMPLE
1317 (simplify
1318 (bit_and SSA_NAME@0 INTEGER_CST@1)
1319 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1320 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1321 @0))
1322 #endif
1323
1324 /* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */
1325 (simplify
1326 (bit_not (minus (bit_not @0) @1))
1327 (plus @0 @1))
1328 (simplify
1329 (bit_not (plus:c (bit_not @0) @1))
1330 (minus @0 @1))
1331 /* (~X - ~Y) -> Y - X. */
1332 (simplify
1333 (minus (bit_not @0) (bit_not @1))
1334 (if (!TYPE_OVERFLOW_SANITIZED (type))
1335 (with { tree utype = unsigned_type_for (type); }
1336 (convert (minus (convert:utype @1) (convert:utype @0))))))
1337
1338 /* ~(X - Y) -> ~X + Y. */
1339 (simplify
1340 (bit_not (minus:s @0 @1))
1341 (plus (bit_not @0) @1))
1342 (simplify
1343 (bit_not (plus:s @0 INTEGER_CST@1))
1344 (if ((INTEGRAL_TYPE_P (type)
1345 && TYPE_UNSIGNED (type))
1346 || (!TYPE_OVERFLOW_SANITIZED (type)
1347 && may_negate_without_overflow_p (@1)))
1348 (plus (bit_not @0) { const_unop (NEGATE_EXPR, type, @1); })))
1349
1350 #if GIMPLE
1351 /* ~X + Y -> (Y - X) - 1. */
1352 (simplify
1353 (plus:c (bit_not @0) @1)
1354 (if (ANY_INTEGRAL_TYPE_P (type)
1355 && TYPE_OVERFLOW_WRAPS (type)
1356 /* -1 - X is folded to ~X, so we'd recurse endlessly. */
1357 && !integer_all_onesp (@1))
1358 (plus (minus @1 @0) { build_minus_one_cst (type); })
1359 (if (INTEGRAL_TYPE_P (type)
1360 && TREE_CODE (@1) == INTEGER_CST
1361 && wi::to_wide (@1) != wi::min_value (TYPE_PRECISION (type),
1362 SIGNED))
1363 (minus (plus @1 { build_minus_one_cst (type); }) @0))))
1364 #endif
1365
1366 /* ~(X >> Y) -> ~X >> Y if ~X can be simplified. */
1367 (simplify
1368 (bit_not (rshift:s @0 @1))
1369 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
1370 (rshift (bit_not! @0) @1)
1371 /* For logical right shifts, this is possible only if @0 doesn't
1372 have MSB set and the logical right shift is changed into
1373 arithmetic shift. */
1374 (if (!wi::neg_p (tree_nonzero_bits (@0)))
1375 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1376 (convert (rshift (bit_not! (convert:stype @0)) @1))))))
1377
1378 /* x + (x & 1) -> (x + 1) & ~1 */
1379 (simplify
1380 (plus:c @0 (bit_and:s @0 integer_onep@1))
1381 (bit_and (plus @0 @1) (bit_not @1)))
1382
1383 /* x & ~(x & y) -> x & ~y */
1384 /* x | ~(x | y) -> x | ~y */
1385 (for bitop (bit_and bit_ior)
1386 (simplify
1387 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1388 (bitop @0 (bit_not @1))))
1389
1390 /* (~x & y) | ~(x | y) -> ~x */
1391 (simplify
1392 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1393 @2)
1394
1395 /* (x | y) ^ (x | ~y) -> ~x */
1396 (simplify
1397 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1398 (bit_not @0))
1399
1400 /* (x & y) | ~(x | y) -> ~(x ^ y) */
1401 (simplify
1402 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1403 (bit_not (bit_xor @0 @1)))
1404
1405 /* (~x | y) ^ (x ^ y) -> x | ~y */
1406 (simplify
1407 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1408 (bit_ior @0 (bit_not @1)))
1409
1410 /* (x ^ y) | ~(x | y) -> ~(x & y) */
1411 (simplify
1412 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1413 (bit_not (bit_and @0 @1)))
1414
1415 /* (x | y) & ~x -> y & ~x */
1416 /* (x & y) | ~x -> y | ~x */
1417 (for bitop (bit_and bit_ior)
1418 rbitop (bit_ior bit_and)
1419 (simplify
1420 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1421 (bitop @1 @2)))
1422
1423 /* (x & y) ^ (x | y) -> x ^ y */
1424 (simplify
1425 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1426 (bit_xor @0 @1))
1427
1428 /* (x ^ y) ^ (x | y) -> x & y */
1429 (simplify
1430 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1431 (bit_and @0 @1))
1432
1433 /* (x & y) + (x ^ y) -> x | y */
1434 /* (x & y) | (x ^ y) -> x | y */
1435 /* (x & y) ^ (x ^ y) -> x | y */
1436 (for op (plus bit_ior bit_xor)
1437 (simplify
1438 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1439 (bit_ior @0 @1)))
1440
1441 /* (x & y) + (x | y) -> x + y */
1442 (simplify
1443 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1444 (plus @0 @1))
1445
1446 /* (x + y) - (x | y) -> x & y */
1447 (simplify
1448 (minus (plus @0 @1) (bit_ior @0 @1))
1449 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1450 && !TYPE_SATURATING (type))
1451 (bit_and @0 @1)))
1452
1453 /* (x + y) - (x & y) -> x | y */
1454 (simplify
1455 (minus (plus @0 @1) (bit_and @0 @1))
1456 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1457 && !TYPE_SATURATING (type))
1458 (bit_ior @0 @1)))
1459
1460 /* (x | y) - y -> (x & ~y) */
1461 (simplify
1462 (minus (bit_ior:cs @0 @1) @1)
1463 (bit_and @0 (bit_not @1)))
1464
1465 /* (x | y) - (x ^ y) -> x & y */
1466 (simplify
1467 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1468 (bit_and @0 @1))
1469
1470 /* (x | y) - (x & y) -> x ^ y */
1471 (simplify
1472 (minus (bit_ior @0 @1) (bit_and @0 @1))
1473 (bit_xor @0 @1))
1474
1475 /* (x | y) & ~(x & y) -> x ^ y */
1476 (simplify
1477 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1478 (bit_xor @0 @1))
1479
1480 /* (x | y) & (~x ^ y) -> x & y */
1481 (simplify
1482 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1483 (bit_and @0 @1))
1484
1485 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1486 (simplify
1487 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1488 (bit_not (bit_xor @0 @1)))
1489
1490 /* (~x | y) ^ (x | ~y) -> x ^ y */
1491 (simplify
1492 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1493 (bit_xor @0 @1))
1494
1495 /* ((x & y) - (x | y)) - 1 -> ~(x ^ y) */
1496 (simplify
1497 (plus (nop_convert1? (minus@2 (nop_convert2? (bit_and:c @0 @1))
1498 (nop_convert2? (bit_ior @0 @1))))
1499 integer_all_onesp)
1500 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1501 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1502 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1503 && !TYPE_SATURATING (TREE_TYPE (@2)))
1504 (bit_not (convert (bit_xor @0 @1)))))
1505 (simplify
1506 (minus (nop_convert1? (plus@2 (nop_convert2? (bit_and:c @0 @1))
1507 integer_all_onesp))
1508 (nop_convert3? (bit_ior @0 @1)))
1509 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1510 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1511 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1512 && !TYPE_SATURATING (TREE_TYPE (@2)))
1513 (bit_not (convert (bit_xor @0 @1)))))
1514 (simplify
1515 (minus (nop_convert1? (bit_and @0 @1))
1516 (nop_convert2? (plus@2 (nop_convert3? (bit_ior:c @0 @1))
1517 integer_onep)))
1518 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1519 && !TYPE_SATURATING (type) && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))
1520 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@2))
1521 && !TYPE_SATURATING (TREE_TYPE (@2)))
1522 (bit_not (convert (bit_xor @0 @1)))))
1523
1524 /* ~x & ~y -> ~(x | y)
1525 ~x | ~y -> ~(x & y) */
1526 (for op (bit_and bit_ior)
1527 rop (bit_ior bit_and)
1528 (simplify
1529 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1530 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1531 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1532 (bit_not (rop (convert @0) (convert @1))))))
1533
1534 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1535 with a constant, and the two constants have no bits in common,
1536 we should treat this as a BIT_IOR_EXPR since this may produce more
1537 simplifications. */
1538 (for op (bit_xor plus)
1539 (simplify
1540 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1541 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1542 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1543 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1544 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1545 (bit_ior (convert @4) (convert @5)))))
1546
1547 /* (X | Y) ^ X -> Y & ~ X*/
1548 (simplify
1549 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1550 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1551 (convert (bit_and @1 (bit_not @0)))))
1552
1553 /* Convert ~X ^ ~Y to X ^ Y. */
1554 (simplify
1555 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1556 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1557 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1558 (bit_xor (convert @0) (convert @1))))
1559
1560 /* Convert ~X ^ C to X ^ ~C. */
1561 (simplify
1562 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1563 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1564 (bit_xor (convert @0) (bit_not @1))))
1565
1566 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1567 (for opo (bit_and bit_xor)
1568 opi (bit_xor bit_and)
1569 (simplify
1570 (opo:c (opi:cs @0 @1) @1)
1571 (bit_and (bit_not @0) @1)))
1572
1573 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1574 operands are another bit-wise operation with a common input. If so,
1575 distribute the bit operations to save an operation and possibly two if
1576 constants are involved. For example, convert
1577 (A | B) & (A | C) into A | (B & C)
1578 Further simplification will occur if B and C are constants. */
1579 (for op (bit_and bit_ior bit_xor)
1580 rop (bit_ior bit_and bit_and)
1581 (simplify
1582 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1583 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1584 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1585 (rop (convert @0) (op (convert @1) (convert @2))))))
1586
1587 /* Some simple reassociation for bit operations, also handled in reassoc. */
1588 /* (X & Y) & Y -> X & Y
1589 (X | Y) | Y -> X | Y */
1590 (for op (bit_and bit_ior)
1591 (simplify
1592 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1593 @2))
1594 /* (X ^ Y) ^ Y -> X */
1595 (simplify
1596 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1597 (convert @0))
1598 /* (X & Y) & (X & Z) -> (X & Y) & Z
1599 (X | Y) | (X | Z) -> (X | Y) | Z */
1600 (for op (bit_and bit_ior)
1601 (simplify
1602 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1603 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1604 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1605 (if (single_use (@5) && single_use (@6))
1606 (op @3 (convert @2))
1607 (if (single_use (@3) && single_use (@4))
1608 (op (convert @1) @5))))))
1609 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1610 (simplify
1611 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1612 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1613 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1614 (bit_xor (convert @1) (convert @2))))
1615
1616 /* Convert abs (abs (X)) into abs (X).
1617 also absu (absu (X)) into absu (X). */
1618 (simplify
1619 (abs (abs@1 @0))
1620 @1)
1621
1622 (simplify
1623 (absu (convert@2 (absu@1 @0)))
1624 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1625 @1))
1626
1627 /* Convert abs[u] (-X) -> abs[u] (X). */
1628 (simplify
1629 (abs (negate @0))
1630 (abs @0))
1631
1632 (simplify
1633 (absu (negate @0))
1634 (absu @0))
1635
1636 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1637 (simplify
1638 (abs tree_expr_nonnegative_p@0)
1639 @0)
1640
1641 (simplify
1642 (absu tree_expr_nonnegative_p@0)
1643 (convert @0))
1644
1645 /* Simplify (-(X < 0) | 1) * X into abs (X) or absu(X). */
1646 (simplify
1647 (mult:c (nop_convert1?
1648 (bit_ior (nop_convert2? (negate (convert? (lt @0 integer_zerop))))
1649 integer_onep))
1650 (nop_convert3? @0))
1651 (if (INTEGRAL_TYPE_P (type)
1652 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1653 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
1654 (if (TYPE_UNSIGNED (type))
1655 (absu @0)
1656 (abs @0)
1657 )
1658 )
1659 )
1660
1661 /* A few cases of fold-const.cc negate_expr_p predicate. */
1662 (match negate_expr_p
1663 INTEGER_CST
1664 (if ((INTEGRAL_TYPE_P (type)
1665 && TYPE_UNSIGNED (type))
1666 || (!TYPE_OVERFLOW_SANITIZED (type)
1667 && may_negate_without_overflow_p (t)))))
1668 (match negate_expr_p
1669 FIXED_CST)
1670 (match negate_expr_p
1671 (negate @0)
1672 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1673 (match negate_expr_p
1674 REAL_CST
1675 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1676 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1677 ways. */
1678 (match negate_expr_p
1679 VECTOR_CST
1680 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1681 (match negate_expr_p
1682 (minus @0 @1)
1683 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1684 || (FLOAT_TYPE_P (type)
1685 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1686 && !HONOR_SIGNED_ZEROS (type)))))
1687
1688 /* (-A) * (-B) -> A * B */
1689 (simplify
1690 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1691 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1692 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1693 (mult (convert @0) (convert (negate @1)))))
1694
1695 /* -(A + B) -> (-B) - A. */
1696 (simplify
1697 (negate (plus:c @0 negate_expr_p@1))
1698 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (type)
1699 && !HONOR_SIGNED_ZEROS (type))
1700 (minus (negate @1) @0)))
1701
1702 /* -(A - B) -> B - A. */
1703 (simplify
1704 (negate (minus @0 @1))
1705 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1706 || (FLOAT_TYPE_P (type)
1707 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1708 && !HONOR_SIGNED_ZEROS (type)))
1709 (minus @1 @0)))
1710 (simplify
1711 (negate (pointer_diff @0 @1))
1712 (if (TYPE_OVERFLOW_UNDEFINED (type))
1713 (pointer_diff @1 @0)))
1714
1715 /* A - B -> A + (-B) if B is easily negatable. */
1716 (simplify
1717 (minus @0 negate_expr_p@1)
1718 (if (!FIXED_POINT_TYPE_P (type))
1719 (plus @0 (negate @1))))
1720
1721 /* Other simplifications of negation (c.f. fold_negate_expr_1). */
1722 (simplify
1723 (negate (mult:c@0 @1 negate_expr_p@2))
1724 (if (! TYPE_UNSIGNED (type)
1725 && ! HONOR_SIGN_DEPENDENT_ROUNDING (type)
1726 && single_use (@0))
1727 (mult @1 (negate @2))))
1728
1729 (simplify
1730 (negate (rdiv@0 @1 negate_expr_p@2))
1731 (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type)
1732 && single_use (@0))
1733 (rdiv @1 (negate @2))))
1734
1735 (simplify
1736 (negate (rdiv@0 negate_expr_p@1 @2))
1737 (if (! HONOR_SIGN_DEPENDENT_ROUNDING (type)
1738 && single_use (@0))
1739 (rdiv (negate @1) @2)))
1740
1741 /* Fold -((int)x >> (prec - 1)) into (unsigned)x >> (prec - 1). */
1742 (simplify
1743 (negate (convert? (rshift @0 INTEGER_CST@1)))
1744 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1745 && wi::to_wide (@1) == element_precision (type) - 1)
1746 (with { tree stype = TREE_TYPE (@0);
1747 tree ntype = TYPE_UNSIGNED (stype) ? signed_type_for (stype)
1748 : unsigned_type_for (stype); }
1749 (if (VECTOR_TYPE_P (type))
1750 (view_convert (rshift (view_convert:ntype @0) @1))
1751 (convert (rshift (convert:ntype @0) @1))))))
1752
1753 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1754 when profitable.
1755 For bitwise binary operations apply operand conversions to the
1756 binary operation result instead of to the operands. This allows
1757 to combine successive conversions and bitwise binary operations.
1758 We combine the above two cases by using a conditional convert. */
1759 (for bitop (bit_and bit_ior bit_xor)
1760 (simplify
1761 (bitop (convert@2 @0) (convert?@3 @1))
1762 (if (((TREE_CODE (@1) == INTEGER_CST
1763 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1764 && (int_fits_type_p (@1, TREE_TYPE (@0))
1765 || tree_nop_conversion_p (TREE_TYPE (@0), type)))
1766 || types_match (@0, @1))
1767 && !POINTER_TYPE_P (TREE_TYPE (@0))
1768 && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE
1769 /* ??? This transform conflicts with fold-const.cc doing
1770 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1771 constants (if x has signed type, the sign bit cannot be set
1772 in c). This folds extension into the BIT_AND_EXPR.
1773 Restrict it to GIMPLE to avoid endless recursions. */
1774 && (bitop != BIT_AND_EXPR || GIMPLE)
1775 && (/* That's a good idea if the conversion widens the operand, thus
1776 after hoisting the conversion the operation will be narrower.
1777 It is also a good if the conversion is a nop as moves the
1778 conversion to one side; allowing for combining of the conversions. */
1779 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1780 /* The conversion check for being a nop can only be done at the gimple
1781 level as fold_binary has some re-association code which can conflict
1782 with this if there is a "constant" which is not a full INTEGER_CST. */
1783 || (GIMPLE && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
1784 /* It's also a good idea if the conversion is to a non-integer
1785 mode. */
1786 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1787 /* Or if the precision of TO is not the same as the precision
1788 of its mode. */
1789 || !type_has_mode_precision_p (type)
1790 /* In GIMPLE, getting rid of 2 conversions for one new results
1791 in smaller IL. */
1792 || (GIMPLE
1793 && TREE_CODE (@1) != INTEGER_CST
1794 && tree_nop_conversion_p (type, TREE_TYPE (@0))
1795 && single_use (@2)
1796 && single_use (@3))))
1797 (convert (bitop @0 (convert @1)))))
1798 /* In GIMPLE, getting rid of 2 conversions for one new results
1799 in smaller IL. */
1800 (simplify
1801 (convert (bitop:cs@2 (nop_convert:s @0) @1))
1802 (if (GIMPLE
1803 && TREE_CODE (@1) != INTEGER_CST
1804 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1805 && types_match (type, @0)
1806 && !POINTER_TYPE_P (TREE_TYPE (@0))
1807 && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE)
1808 (bitop @0 (convert @1)))))
1809
1810 (for bitop (bit_and bit_ior)
1811 rbitop (bit_ior bit_and)
1812 /* (x | y) & x -> x */
1813 /* (x & y) | x -> x */
1814 (simplify
1815 (bitop:c (rbitop:c @0 @1) @0)
1816 @0)
1817 /* (~x | y) & x -> x & y */
1818 /* (~x & y) | x -> x | y */
1819 (simplify
1820 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1821 (bitop @0 @1)))
1822
1823 /* ((x | y) & z) | x -> (z & y) | x */
1824 (simplify
1825 (bit_ior:c (bit_and:cs (bit_ior:cs @0 @1) @2) @0)
1826 (bit_ior (bit_and @2 @1) @0))
1827
1828 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1829 (simplify
1830 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1831 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1832
1833 /* Combine successive equal operations with constants. */
1834 (for bitop (bit_and bit_ior bit_xor)
1835 (simplify
1836 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1837 (if (!CONSTANT_CLASS_P (@0))
1838 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1839 folded to a constant. */
1840 (bitop @0 (bitop @1 @2))
1841 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1842 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1843 the values involved are such that the operation can't be decided at
1844 compile time. Try folding one of @0 or @1 with @2 to see whether
1845 that combination can be decided at compile time.
1846
1847 Keep the existing form if both folds fail, to avoid endless
1848 oscillation. */
1849 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1850 (if (cst1)
1851 (bitop @1 { cst1; })
1852 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1853 (if (cst2)
1854 (bitop @0 { cst2; }))))))))
1855
1856 /* Try simple folding for X op !X, and X op X with the help
1857 of the truth_valued_p and logical_inverted_value predicates. */
1858 (match truth_valued_p
1859 @0
1860 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1861 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1862 (match truth_valued_p
1863 (op @0 @1)))
1864 (match truth_valued_p
1865 (truth_not @0))
1866
1867 (match (logical_inverted_value @0)
1868 (truth_not @0))
1869 (match (logical_inverted_value @0)
1870 (bit_not truth_valued_p@0))
1871 (match (logical_inverted_value @0)
1872 (eq @0 integer_zerop))
1873 (match (logical_inverted_value @0)
1874 (ne truth_valued_p@0 integer_truep))
1875 (match (logical_inverted_value @0)
1876 (bit_xor truth_valued_p@0 integer_truep))
1877
1878 /* X & !X -> 0. */
1879 (simplify
1880 (bit_and:c @0 (logical_inverted_value @0))
1881 { build_zero_cst (type); })
1882 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1883 (for op (bit_ior bit_xor)
1884 (simplify
1885 (op:c truth_valued_p@0 (logical_inverted_value @0))
1886 { constant_boolean_node (true, type); }))
1887 /* X ==/!= !X is false/true. */
1888 (for op (eq ne)
1889 (simplify
1890 (op:c truth_valued_p@0 (logical_inverted_value @0))
1891 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1892
1893 /* ~~x -> x */
1894 (simplify
1895 (bit_not (bit_not @0))
1896 @0)
1897
1898 (match zero_one_valued_p
1899 @0
1900 (if (INTEGRAL_TYPE_P (type) && tree_nonzero_bits (@0) == 1)))
1901 (match zero_one_valued_p
1902 truth_valued_p@0)
1903
1904 /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 }. */
1905 (simplify
1906 (mult zero_one_valued_p@0 zero_one_valued_p@1)
1907 (if (INTEGRAL_TYPE_P (type))
1908 (bit_and @0 @1)))
1909
1910 /* Transform X & -Y into X * Y when Y is { 0 or 1 }. */
1911 (simplify
1912 (bit_and:c (convert? (negate zero_one_valued_p@0)) @1)
1913 (if (INTEGRAL_TYPE_P (type)
1914 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1915 && TREE_CODE (TREE_TYPE (@0)) != BOOLEAN_TYPE
1916 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
1917 (mult (convert @0) @1)))
1918
1919 /* Narrow integer multiplication by a zero_one_valued_p operand.
1920 Multiplication by [0,1] is guaranteed not to overflow. */
1921 (simplify
1922 (convert (mult@0 zero_one_valued_p@1 INTEGER_CST@2))
1923 (if (INTEGRAL_TYPE_P (type)
1924 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1925 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@0)))
1926 (mult (convert @1) (convert @2))))
1927
1928 /* (X << C) != 0 can be simplified to X, when C is zero_one_valued_p.
1929 Check that the shift is well-defined (C is less than TYPE_PRECISION)
1930 as some targets (such as x86's SSE) may return zero for larger C. */
1931 (simplify
1932 (ne (lshift zero_one_valued_p@0 INTEGER_CST@1) integer_zerop@2)
1933 (if (tree_fits_shwi_p (@1)
1934 && tree_to_shwi (@1) > 0
1935 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
1936 (convert @0)))
1937
1938 /* (X << C) == 0 can be simplified to X == 0, when C is zero_one_valued_p.
1939 Check that the shift is well-defined (C is less than TYPE_PRECISION)
1940 as some targets (such as x86's SSE) may return zero for larger C. */
1941 (simplify
1942 (eq (lshift zero_one_valued_p@0 INTEGER_CST@1) integer_zerop@2)
1943 (if (tree_fits_shwi_p (@1)
1944 && tree_to_shwi (@1) > 0
1945 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
1946 (eq @0 @2)))
1947
1948 /* Convert ~ (-A) to A - 1. */
1949 (simplify
1950 (bit_not (convert? (negate @0)))
1951 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1952 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1953 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1954
1955 /* Convert - (~A) to A + 1. */
1956 (simplify
1957 (negate (nop_convert? (bit_not @0)))
1958 (plus (view_convert @0) { build_each_one_cst (type); }))
1959
1960 /* (a & b) ^ (a == b) -> !(a | b) */
1961 /* (a & b) == (a ^ b) -> !(a | b) */
1962 (for first_op (bit_xor eq)
1963 second_op (eq bit_xor)
1964 (simplify
1965 (first_op:c (bit_and:c truth_valued_p@0 truth_valued_p@1) (second_op:c @0 @1))
1966 (bit_not (bit_ior @0 @1))))
1967
1968 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1969 (simplify
1970 (bit_not (convert? (minus @0 integer_each_onep)))
1971 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1972 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1973 (convert (negate @0))))
1974 (simplify
1975 (bit_not (convert? (plus @0 integer_all_onesp)))
1976 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1977 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1978 (convert (negate @0))))
1979
1980 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1981 (simplify
1982 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1983 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1984 (convert (bit_xor @0 (bit_not @1)))))
1985 (simplify
1986 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1987 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1988 (convert (bit_xor @0 @1))))
1989
1990 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1991 (simplify
1992 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
1993 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1994 (bit_not (bit_xor (view_convert @0) @1))))
1995
1996 /* ~(a ^ b) is a == b for truth valued a and b. */
1997 (simplify
1998 (bit_not (bit_xor:s truth_valued_p@0 truth_valued_p@1))
1999 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2000 && TYPE_PRECISION (TREE_TYPE (@0)) == 1)
2001 (convert (eq @0 @1))))
2002
2003 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
2004 (simplify
2005 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
2006 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
2007
2008 /* Fold A - (A & B) into ~B & A. */
2009 (simplify
2010 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
2011 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2012 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
2013 (convert (bit_and (bit_not @1) @0))))
2014
2015 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
2016 (if (!canonicalize_math_p ())
2017 (for cmp (gt lt ge le)
2018 (simplify
2019 (mult (convert (cmp @0 @1)) @2)
2020 (cond (cmp @0 @1) @2 { build_zero_cst (type); }))))
2021
2022 /* For integral types with undefined overflow and C != 0 fold
2023 x * C EQ/NE y * C into x EQ/NE y. */
2024 (for cmp (eq ne)
2025 (simplify
2026 (cmp (mult:c @0 @1) (mult:c @2 @1))
2027 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2028 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2029 && tree_expr_nonzero_p (@1))
2030 (cmp @0 @2))))
2031
2032 /* For integral types with wrapping overflow and C odd fold
2033 x * C EQ/NE y * C into x EQ/NE y. */
2034 (for cmp (eq ne)
2035 (simplify
2036 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
2037 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2038 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
2039 && (TREE_INT_CST_LOW (@1) & 1) != 0)
2040 (cmp @0 @2))))
2041
2042 /* For integral types with undefined overflow and C != 0 fold
2043 x * C RELOP y * C into:
2044
2045 x RELOP y for nonnegative C
2046 y RELOP x for negative C */
2047 (for cmp (lt gt le ge)
2048 (simplify
2049 (cmp (mult:c @0 @1) (mult:c @2 @1))
2050 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2051 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2052 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
2053 (cmp @0 @2)
2054 (if (TREE_CODE (@1) == INTEGER_CST
2055 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
2056 (cmp @2 @0))))))
2057
2058 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
2059 (for cmp (le gt)
2060 icmp (gt le)
2061 (simplify
2062 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
2063 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2064 && TYPE_UNSIGNED (TREE_TYPE (@0))
2065 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
2066 && (wi::to_wide (@2)
2067 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
2068 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2069 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
2070
2071 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
2072 (for cmp (simple_comparison)
2073 (simplify
2074 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
2075 (if (element_precision (@3) >= element_precision (@0)
2076 && types_match (@0, @1))
2077 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
2078 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
2079 (cmp @1 @0)
2080 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
2081 (with
2082 {
2083 tree utype = unsigned_type_for (TREE_TYPE (@0));
2084 }
2085 (cmp (convert:utype @1) (convert:utype @0)))))
2086 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
2087 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
2088 (cmp @0 @1)
2089 (with
2090 {
2091 tree utype = unsigned_type_for (TREE_TYPE (@0));
2092 }
2093 (cmp (convert:utype @0) (convert:utype @1)))))))))
2094
2095 /* X / C1 op C2 into a simple range test. */
2096 (for cmp (simple_comparison)
2097 (simplify
2098 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
2099 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2100 && integer_nonzerop (@1)
2101 && !TREE_OVERFLOW (@1)
2102 && !TREE_OVERFLOW (@2))
2103 (with { tree lo, hi; bool neg_overflow;
2104 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
2105 &neg_overflow); }
2106 (switch
2107 (if (code == LT_EXPR || code == GE_EXPR)
2108 (if (TREE_OVERFLOW (lo))
2109 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
2110 (if (code == LT_EXPR)
2111 (lt @0 { lo; })
2112 (ge @0 { lo; }))))
2113 (if (code == LE_EXPR || code == GT_EXPR)
2114 (if (TREE_OVERFLOW (hi))
2115 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
2116 (if (code == LE_EXPR)
2117 (le @0 { hi; })
2118 (gt @0 { hi; }))))
2119 (if (!lo && !hi)
2120 { build_int_cst (type, code == NE_EXPR); })
2121 (if (code == EQ_EXPR && !hi)
2122 (ge @0 { lo; }))
2123 (if (code == EQ_EXPR && !lo)
2124 (le @0 { hi; }))
2125 (if (code == NE_EXPR && !hi)
2126 (lt @0 { lo; }))
2127 (if (code == NE_EXPR && !lo)
2128 (gt @0 { hi; }))
2129 (if (GENERIC)
2130 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
2131 lo, hi); })
2132 (with
2133 {
2134 tree etype = range_check_type (TREE_TYPE (@0));
2135 if (etype)
2136 {
2137 hi = fold_convert (etype, hi);
2138 lo = fold_convert (etype, lo);
2139 hi = const_binop (MINUS_EXPR, etype, hi, lo);
2140 }
2141 }
2142 (if (etype && hi && !TREE_OVERFLOW (hi))
2143 (if (code == EQ_EXPR)
2144 (le (minus (convert:etype @0) { lo; }) { hi; })
2145 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
2146
2147 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
2148 (for op (lt le ge gt)
2149 (simplify
2150 (op (plus:c @0 @2) (plus:c @1 @2))
2151 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2152 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2153 (op @0 @1))))
2154
2155 /* As a special case, X + C < Y + C is the same as (signed) X < (signed) Y
2156 when C is an unsigned integer constant with only the MSB set, and X and
2157 Y have types of equal or lower integer conversion rank than C's. */
2158 (for op (lt le ge gt)
2159 (simplify
2160 (op (plus @1 INTEGER_CST@0) (plus @2 @0))
2161 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2162 && TYPE_UNSIGNED (TREE_TYPE (@0))
2163 && wi::only_sign_bit_p (wi::to_wide (@0)))
2164 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2165 (op (convert:stype @1) (convert:stype @2))))))
2166
2167 /* For equality and subtraction, this is also true with wrapping overflow. */
2168 (for op (eq ne minus)
2169 (simplify
2170 (op (plus:c @0 @2) (plus:c @1 @2))
2171 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2172 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2173 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2174 (op @0 @1))))
2175
2176 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
2177 (for op (lt le ge gt)
2178 (simplify
2179 (op (minus @0 @2) (minus @1 @2))
2180 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2181 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2182 (op @0 @1))))
2183 /* For equality and subtraction, this is also true with wrapping overflow. */
2184 (for op (eq ne minus)
2185 (simplify
2186 (op (minus @0 @2) (minus @1 @2))
2187 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2188 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2189 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2190 (op @0 @1))))
2191 /* And for pointers... */
2192 (for op (simple_comparison)
2193 (simplify
2194 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
2195 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2196 (op @0 @1))))
2197 (simplify
2198 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
2199 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
2200 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2201 (pointer_diff @0 @1)))
2202
2203 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
2204 (for op (lt le ge gt)
2205 (simplify
2206 (op (minus @2 @0) (minus @2 @1))
2207 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2208 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2209 (op @1 @0))))
2210 /* For equality and subtraction, this is also true with wrapping overflow. */
2211 (for op (eq ne minus)
2212 (simplify
2213 (op (minus @2 @0) (minus @2 @1))
2214 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2215 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2216 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2217 (op @1 @0))))
2218 /* And for pointers... */
2219 (for op (simple_comparison)
2220 (simplify
2221 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
2222 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2223 (op @1 @0))))
2224 (simplify
2225 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
2226 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
2227 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
2228 (pointer_diff @1 @0)))
2229
2230 /* X + Y < Y is the same as X < 0 when there is no overflow. */
2231 (for op (lt le gt ge)
2232 (simplify
2233 (op:c (plus:c@2 @0 @1) @1)
2234 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2235 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2236 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2237 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
2238 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
2239 /* For equality, this is also true with wrapping overflow. */
2240 (for op (eq ne)
2241 (simplify
2242 (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
2243 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2244 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2245 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2246 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
2247 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
2248 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
2249 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
2250 (simplify
2251 (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
2252 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
2253 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2254 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
2255 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
2256
2257 /* X - Y < X is the same as Y > 0 when there is no overflow.
2258 For equality, this is also true with wrapping overflow. */
2259 (for op (simple_comparison)
2260 (simplify
2261 (op:c @0 (minus@2 @0 @1))
2262 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2263 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2264 || ((op == EQ_EXPR || op == NE_EXPR)
2265 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
2266 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
2267 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
2268
2269 /* Transform:
2270 (X / Y) == 0 -> X < Y if X, Y are unsigned.
2271 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
2272 (for cmp (eq ne)
2273 ocmp (lt ge)
2274 (simplify
2275 (cmp (trunc_div @0 @1) integer_zerop)
2276 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2277 /* Complex ==/!= is allowed, but not </>=. */
2278 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
2279 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
2280 (ocmp @0 @1))))
2281
2282 /* X == C - X can never be true if C is odd. */
2283 (for cmp (eq ne)
2284 (simplify
2285 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
2286 (if (TREE_INT_CST_LOW (@1) & 1)
2287 { constant_boolean_node (cmp == NE_EXPR, type); })))
2288
2289 /* Arguments on which one can call get_nonzero_bits to get the bits
2290 possibly set. */
2291 (match with_possible_nonzero_bits
2292 INTEGER_CST@0)
2293 (match with_possible_nonzero_bits
2294 SSA_NAME@0
2295 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
2296 /* Slightly extended version, do not make it recursive to keep it cheap. */
2297 (match (with_possible_nonzero_bits2 @0)
2298 with_possible_nonzero_bits@0)
2299 (match (with_possible_nonzero_bits2 @0)
2300 (bit_and:c with_possible_nonzero_bits@0 @2))
2301
2302 /* Same for bits that are known to be set, but we do not have
2303 an equivalent to get_nonzero_bits yet. */
2304 (match (with_certain_nonzero_bits2 @0)
2305 INTEGER_CST@0)
2306 (match (with_certain_nonzero_bits2 @0)
2307 (bit_ior @1 INTEGER_CST@0))
2308
2309 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
2310 (for cmp (eq ne)
2311 (simplify
2312 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
2313 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
2314 { constant_boolean_node (cmp == NE_EXPR, type); })))
2315
2316 /* ((X inner_op C0) outer_op C1)
2317 With X being a tree where value_range has reasoned certain bits to always be
2318 zero throughout its computed value range,
2319 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
2320 where zero_mask has 1's for all bits that are sure to be 0 in
2321 and 0's otherwise.
2322 if (inner_op == '^') C0 &= ~C1;
2323 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
2324 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
2325 */
2326 (for inner_op (bit_ior bit_xor)
2327 outer_op (bit_xor bit_ior)
2328 (simplify
2329 (outer_op
2330 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
2331 (with
2332 {
2333 bool fail = false;
2334 wide_int zero_mask_not;
2335 wide_int C0;
2336 wide_int cst_emit;
2337
2338 if (TREE_CODE (@2) == SSA_NAME)
2339 zero_mask_not = get_nonzero_bits (@2);
2340 else
2341 fail = true;
2342
2343 if (inner_op == BIT_XOR_EXPR)
2344 {
2345 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
2346 cst_emit = C0 | wi::to_wide (@1);
2347 }
2348 else
2349 {
2350 C0 = wi::to_wide (@0);
2351 cst_emit = C0 ^ wi::to_wide (@1);
2352 }
2353 }
2354 (if (!fail && (C0 & zero_mask_not) == 0)
2355 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
2356 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
2357 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
2358
2359 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
2360 (simplify
2361 (pointer_plus (pointer_plus:s @0 @1) @3)
2362 (pointer_plus @0 (plus @1 @3)))
2363 #if GENERIC
2364 (simplify
2365 (pointer_plus (convert:s (pointer_plus:s @0 @1)) @3)
2366 (convert:type (pointer_plus @0 (plus @1 @3))))
2367 #endif
2368
2369 /* Pattern match
2370 tem1 = (long) ptr1;
2371 tem2 = (long) ptr2;
2372 tem3 = tem2 - tem1;
2373 tem4 = (unsigned long) tem3;
2374 tem5 = ptr1 + tem4;
2375 and produce
2376 tem5 = ptr2; */
2377 (simplify
2378 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
2379 /* Conditionally look through a sign-changing conversion. */
2380 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
2381 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
2382 || (GENERIC && type == TREE_TYPE (@1))))
2383 @1))
2384 (simplify
2385 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
2386 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
2387 (convert @1)))
2388
2389 /* Pattern match
2390 tem = (sizetype) ptr;
2391 tem = tem & algn;
2392 tem = -tem;
2393 ... = ptr p+ tem;
2394 and produce the simpler and easier to analyze with respect to alignment
2395 ... = ptr & ~algn; */
2396 (simplify
2397 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
2398 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
2399 (bit_and @0 { algn; })))
2400
2401 /* Try folding difference of addresses. */
2402 (simplify
2403 (minus (convert ADDR_EXPR@0) (convert @1))
2404 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2405 (with { poly_int64 diff; }
2406 (if (ptr_difference_const (@0, @1, &diff))
2407 { build_int_cst_type (type, diff); }))))
2408 (simplify
2409 (minus (convert @0) (convert ADDR_EXPR@1))
2410 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2411 (with { poly_int64 diff; }
2412 (if (ptr_difference_const (@0, @1, &diff))
2413 { build_int_cst_type (type, diff); }))))
2414 (simplify
2415 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
2416 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
2417 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
2418 (with { poly_int64 diff; }
2419 (if (ptr_difference_const (@0, @1, &diff))
2420 { build_int_cst_type (type, diff); }))))
2421 (simplify
2422 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
2423 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
2424 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
2425 (with { poly_int64 diff; }
2426 (if (ptr_difference_const (@0, @1, &diff))
2427 { build_int_cst_type (type, diff); }))))
2428
2429 /* (&a+b) - (&a[1] + c) -> sizeof(a[0]) + (b - c) */
2430 (simplify
2431 (pointer_diff (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3))
2432 (with { poly_int64 diff; }
2433 (if (ptr_difference_const (@0, @2, &diff))
2434 (plus { build_int_cst_type (type, diff); } (convert (minus @1 @3))))))
2435
2436 /* (&a+b) !=/== (&a[1] + c) -> sizeof(a[0]) + b !=/== c */
2437 (for neeq (ne eq)
2438 (simplify
2439 (neeq (pointer_plus ADDR_EXPR@0 @1) (pointer_plus ADDR_EXPR@2 @3))
2440 (with { poly_int64 diff; tree inner_type = TREE_TYPE (@1);}
2441 (if (ptr_difference_const (@0, @2, &diff))
2442 (neeq (plus { build_int_cst_type (inner_type, diff); } @1) @3)))))
2443
2444 /* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */
2445 (simplify
2446 (convert (pointer_diff @0 INTEGER_CST@1))
2447 (if (POINTER_TYPE_P (type))
2448 { build_fold_addr_expr_with_type
2449 (build2 (MEM_REF, char_type_node, @0,
2450 wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))),
2451 type); }))
2452
2453 /* If arg0 is derived from the address of an object or function, we may
2454 be able to fold this expression using the object or function's
2455 alignment. */
2456 (simplify
2457 (bit_and (convert? @0) INTEGER_CST@1)
2458 (if (POINTER_TYPE_P (TREE_TYPE (@0))
2459 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2460 (with
2461 {
2462 unsigned int align;
2463 unsigned HOST_WIDE_INT bitpos;
2464 get_pointer_alignment_1 (@0, &align, &bitpos);
2465 }
2466 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
2467 { wide_int_to_tree (type, (wi::to_wide (@1)
2468 & (bitpos / BITS_PER_UNIT))); }))))
2469
2470 (match min_value
2471 INTEGER_CST
2472 (if (INTEGRAL_TYPE_P (type)
2473 && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
2474
2475 (match max_value
2476 INTEGER_CST
2477 (if (INTEGRAL_TYPE_P (type)
2478 && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
2479
2480 /* x > y && x != XXX_MIN --> x > y
2481 x > y && x == XXX_MIN --> false . */
2482 (for eqne (eq ne)
2483 (simplify
2484 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
2485 (switch
2486 (if (eqne == EQ_EXPR)
2487 { constant_boolean_node (false, type); })
2488 (if (eqne == NE_EXPR)
2489 @2)
2490 )))
2491
2492 /* x < y && x != XXX_MAX --> x < y
2493 x < y && x == XXX_MAX --> false. */
2494 (for eqne (eq ne)
2495 (simplify
2496 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
2497 (switch
2498 (if (eqne == EQ_EXPR)
2499 { constant_boolean_node (false, type); })
2500 (if (eqne == NE_EXPR)
2501 @2)
2502 )))
2503
2504 /* x <= y && x == XXX_MIN --> x == XXX_MIN. */
2505 (simplify
2506 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
2507 @2)
2508
2509 /* x >= y && x == XXX_MAX --> x == XXX_MAX. */
2510 (simplify
2511 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
2512 @2)
2513
2514 /* x > y || x != XXX_MIN --> x != XXX_MIN. */
2515 (simplify
2516 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
2517 @2)
2518
2519 /* x <= y || x != XXX_MIN --> true. */
2520 (simplify
2521 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
2522 { constant_boolean_node (true, type); })
2523
2524 /* x <= y || x == XXX_MIN --> x <= y. */
2525 (simplify
2526 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
2527 @2)
2528
2529 /* x < y || x != XXX_MAX --> x != XXX_MAX. */
2530 (simplify
2531 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
2532 @2)
2533
2534 /* x >= y || x != XXX_MAX --> true
2535 x >= y || x == XXX_MAX --> x >= y. */
2536 (for eqne (eq ne)
2537 (simplify
2538 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
2539 (switch
2540 (if (eqne == EQ_EXPR)
2541 @2)
2542 (if (eqne == NE_EXPR)
2543 { constant_boolean_node (true, type); }))))
2544
2545 /* y == XXX_MIN || x < y --> x <= y - 1 */
2546 (simplify
2547 (bit_ior:c (eq:s @1 min_value) (lt:cs @0 @1))
2548 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2549 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2550 (le @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))
2551
2552 /* y != XXX_MIN && x >= y --> x > y - 1 */
2553 (simplify
2554 (bit_and:c (ne:s @1 min_value) (ge:cs @0 @1))
2555 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2556 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2557 (gt @0 (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))
2558
2559 /* Convert (X == CST1) && (X OP2 CST2) to a known value
2560 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2561
2562 (for code1 (eq ne)
2563 (for code2 (eq ne lt gt le ge)
2564 (simplify
2565 (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2566 (with
2567 {
2568 int cmp = tree_int_cst_compare (@1, @2);
2569 bool val;
2570 switch (code2)
2571 {
2572 case EQ_EXPR: val = (cmp == 0); break;
2573 case NE_EXPR: val = (cmp != 0); break;
2574 case LT_EXPR: val = (cmp < 0); break;
2575 case GT_EXPR: val = (cmp > 0); break;
2576 case LE_EXPR: val = (cmp <= 0); break;
2577 case GE_EXPR: val = (cmp >= 0); break;
2578 default: gcc_unreachable ();
2579 }
2580 }
2581 (switch
2582 (if (code1 == EQ_EXPR && val) @3)
2583 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
2584 (if (code1 == NE_EXPR && !val) @4))))))
2585
2586 /* Convert (X OP1 CST1) && (X OP2 CST2). */
2587
2588 (for code1 (lt le gt ge)
2589 (for code2 (lt le gt ge)
2590 (simplify
2591 (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2))
2592 (with
2593 {
2594 int cmp = tree_int_cst_compare (@1, @2);
2595 }
2596 (switch
2597 /* Choose the more restrictive of two < or <= comparisons. */
2598 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2599 && (code2 == LT_EXPR || code2 == LE_EXPR))
2600 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2601 @3
2602 @4))
2603 /* Likewise chose the more restrictive of two > or >= comparisons. */
2604 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2605 && (code2 == GT_EXPR || code2 == GE_EXPR))
2606 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2607 @3
2608 @4))
2609 /* Check for singleton ranges. */
2610 (if (cmp == 0
2611 && ((code1 == LE_EXPR && code2 == GE_EXPR)
2612 || (code1 == GE_EXPR && code2 == LE_EXPR)))
2613 (eq @0 @1))
2614 /* Check for disjoint ranges. */
2615 (if (cmp <= 0
2616 && (code1 == LT_EXPR || code1 == LE_EXPR)
2617 && (code2 == GT_EXPR || code2 == GE_EXPR))
2618 { constant_boolean_node (false, type); })
2619 (if (cmp >= 0
2620 && (code1 == GT_EXPR || code1 == GE_EXPR)
2621 && (code2 == LT_EXPR || code2 == LE_EXPR))
2622 { constant_boolean_node (false, type); })
2623 )))))
2624
2625 /* Convert (X == CST1) || (X OP2 CST2) to a known value
2626 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2627
2628 (for code1 (eq ne)
2629 (for code2 (eq ne lt gt le ge)
2630 (simplify
2631 (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2632 (with
2633 {
2634 int cmp = tree_int_cst_compare (@1, @2);
2635 bool val;
2636 switch (code2)
2637 {
2638 case EQ_EXPR: val = (cmp == 0); break;
2639 case NE_EXPR: val = (cmp != 0); break;
2640 case LT_EXPR: val = (cmp < 0); break;
2641 case GT_EXPR: val = (cmp > 0); break;
2642 case LE_EXPR: val = (cmp <= 0); break;
2643 case GE_EXPR: val = (cmp >= 0); break;
2644 default: gcc_unreachable ();
2645 }
2646 }
2647 (switch
2648 (if (code1 == EQ_EXPR && val) @4)
2649 (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); })
2650 (if (code1 == NE_EXPR && !val) @3))))))
2651
2652 /* Convert (X OP1 CST1) || (X OP2 CST2). */
2653
2654 (for code1 (lt le gt ge)
2655 (for code2 (lt le gt ge)
2656 (simplify
2657 (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2658 (with
2659 {
2660 int cmp = tree_int_cst_compare (@1, @2);
2661 }
2662 (switch
2663 /* Choose the more restrictive of two < or <= comparisons. */
2664 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2665 && (code2 == LT_EXPR || code2 == LE_EXPR))
2666 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2667 @4
2668 @3))
2669 /* Likewise chose the more restrictive of two > or >= comparisons. */
2670 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2671 && (code2 == GT_EXPR || code2 == GE_EXPR))
2672 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2673 @4
2674 @3))
2675 /* Check for singleton ranges. */
2676 (if (cmp == 0
2677 && ((code1 == LT_EXPR && code2 == GT_EXPR)
2678 || (code1 == GT_EXPR && code2 == LT_EXPR)))
2679 (ne @0 @2))
2680 /* Check for disjoint ranges. */
2681 (if (cmp >= 0
2682 && (code1 == LT_EXPR || code1 == LE_EXPR)
2683 && (code2 == GT_EXPR || code2 == GE_EXPR))
2684 { constant_boolean_node (true, type); })
2685 (if (cmp <= 0
2686 && (code1 == GT_EXPR || code1 == GE_EXPR)
2687 && (code2 == LT_EXPR || code2 == LE_EXPR))
2688 { constant_boolean_node (true, type); })
2689 )))))
2690
2691 /* We can't reassociate at all for saturating types. */
2692 (if (!TYPE_SATURATING (type))
2693
2694 /* Contract negates. */
2695 /* A + (-B) -> A - B */
2696 (simplify
2697 (plus:c @0 (convert? (negate @1)))
2698 /* Apply STRIP_NOPS on the negate. */
2699 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2700 && !TYPE_OVERFLOW_SANITIZED (type))
2701 (with
2702 {
2703 tree t1 = type;
2704 if (INTEGRAL_TYPE_P (type)
2705 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2706 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2707 }
2708 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
2709 /* A - (-B) -> A + B */
2710 (simplify
2711 (minus @0 (convert? (negate @1)))
2712 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2713 && !TYPE_OVERFLOW_SANITIZED (type))
2714 (with
2715 {
2716 tree t1 = type;
2717 if (INTEGRAL_TYPE_P (type)
2718 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2719 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2720 }
2721 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
2722 /* -(T)(-A) -> (T)A
2723 Sign-extension is ok except for INT_MIN, which thankfully cannot
2724 happen without overflow. */
2725 (simplify
2726 (negate (convert (negate @1)))
2727 (if (INTEGRAL_TYPE_P (type)
2728 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
2729 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2730 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2731 && !TYPE_OVERFLOW_SANITIZED (type)
2732 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2733 (convert @1)))
2734 (simplify
2735 (negate (convert negate_expr_p@1))
2736 (if (SCALAR_FLOAT_TYPE_P (type)
2737 && ((DECIMAL_FLOAT_TYPE_P (type)
2738 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2739 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2740 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2741 (convert (negate @1))))
2742 (simplify
2743 (negate (nop_convert? (negate @1)))
2744 (if (!TYPE_OVERFLOW_SANITIZED (type)
2745 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2746 (view_convert @1)))
2747
2748 /* We can't reassociate floating-point unless -fassociative-math
2749 or fixed-point plus or minus because of saturation to +-Inf. */
2750 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2751 && !FIXED_POINT_TYPE_P (type))
2752
2753 /* Match patterns that allow contracting a plus-minus pair
2754 irrespective of overflow issues. */
2755 /* (A +- B) - A -> +- B */
2756 /* (A +- B) -+ B -> A */
2757 /* A - (A +- B) -> -+ B */
2758 /* A +- (B -+ A) -> +- B */
2759 (simplify
2760 (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
2761 (view_convert @1))
2762 (simplify
2763 (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
2764 (if (!ANY_INTEGRAL_TYPE_P (type)
2765 || TYPE_OVERFLOW_WRAPS (type))
2766 (negate (view_convert @1))
2767 (view_convert (negate @1))))
2768 (simplify
2769 (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
2770 (view_convert @0))
2771 (simplify
2772 (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
2773 (if (!ANY_INTEGRAL_TYPE_P (type)
2774 || TYPE_OVERFLOW_WRAPS (type))
2775 (negate (view_convert @1))
2776 (view_convert (negate @1))))
2777 (simplify
2778 (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
2779 (view_convert @1))
2780 /* (A +- B) + (C - A) -> C +- B */
2781 /* (A + B) - (A - C) -> B + C */
2782 /* More cases are handled with comparisons. */
2783 (simplify
2784 (plus:c (plus:c @0 @1) (minus @2 @0))
2785 (plus @2 @1))
2786 (simplify
2787 (plus:c (minus @0 @1) (minus @2 @0))
2788 (minus @2 @1))
2789 (simplify
2790 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2791 (if (TYPE_OVERFLOW_UNDEFINED (type)
2792 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2793 (pointer_diff @2 @1)))
2794 (simplify
2795 (minus (plus:c @0 @1) (minus @0 @2))
2796 (plus @1 @2))
2797
2798 /* (A +- CST1) +- CST2 -> A + CST3
2799 Use view_convert because it is safe for vectors and equivalent for
2800 scalars. */
2801 (for outer_op (plus minus)
2802 (for inner_op (plus minus)
2803 neg_inner_op (minus plus)
2804 (simplify
2805 (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
2806 CONSTANT_CLASS_P@2)
2807 /* If one of the types wraps, use that one. */
2808 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2809 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2810 forever if something doesn't simplify into a constant. */
2811 (if (!CONSTANT_CLASS_P (@0))
2812 (if (outer_op == PLUS_EXPR)
2813 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
2814 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
2815 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2816 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2817 (if (outer_op == PLUS_EXPR)
2818 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
2819 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
2820 /* If the constant operation overflows we cannot do the transform
2821 directly as we would introduce undefined overflow, for example
2822 with (a - 1) + INT_MIN. */
2823 (if (types_match (type, @0))
2824 (with { tree cst = const_binop (outer_op == inner_op
2825 ? PLUS_EXPR : MINUS_EXPR,
2826 type, @1, @2); }
2827 (if (cst && !TREE_OVERFLOW (cst))
2828 (inner_op @0 { cst; } )
2829 /* X+INT_MAX+1 is X-INT_MIN. */
2830 (if (INTEGRAL_TYPE_P (type) && cst
2831 && wi::to_wide (cst) == wi::min_value (type))
2832 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
2833 /* Last resort, use some unsigned type. */
2834 (with { tree utype = unsigned_type_for (type); }
2835 (if (utype)
2836 (view_convert (inner_op
2837 (view_convert:utype @0)
2838 (view_convert:utype
2839 { drop_tree_overflow (cst); }))))))))))))))
2840
2841 /* (CST1 - A) +- CST2 -> CST3 - A */
2842 (for outer_op (plus minus)
2843 (simplify
2844 (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
2845 /* If one of the types wraps, use that one. */
2846 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2847 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2848 forever if something doesn't simplify into a constant. */
2849 (if (!CONSTANT_CLASS_P (@0))
2850 (minus (outer_op (view_convert @1) @2) (view_convert @0)))
2851 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2852 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2853 (view_convert (minus (outer_op @1 (view_convert @2)) @0))
2854 (if (types_match (type, @0))
2855 (with { tree cst = const_binop (outer_op, type, @1, @2); }
2856 (if (cst && !TREE_OVERFLOW (cst))
2857 (minus { cst; } @0))))))))
2858
2859 /* CST1 - (CST2 - A) -> CST3 + A
2860 Use view_convert because it is safe for vectors and equivalent for
2861 scalars. */
2862 (simplify
2863 (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
2864 /* If one of the types wraps, use that one. */
2865 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2866 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2867 forever if something doesn't simplify into a constant. */
2868 (if (!CONSTANT_CLASS_P (@0))
2869 (plus (view_convert @0) (minus @1 (view_convert @2))))
2870 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2871 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2872 (view_convert (plus @0 (minus (view_convert @1) @2)))
2873 (if (types_match (type, @0))
2874 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2875 (if (cst && !TREE_OVERFLOW (cst))
2876 (plus { cst; } @0)))))))
2877
2878 /* ((T)(A)) + CST -> (T)(A + CST) */
2879 #if GIMPLE
2880 (simplify
2881 (plus (convert:s SSA_NAME@0) INTEGER_CST@1)
2882 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2883 && TREE_CODE (type) == INTEGER_TYPE
2884 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2885 && int_fits_type_p (@1, TREE_TYPE (@0)))
2886 /* Perform binary operation inside the cast if the constant fits
2887 and (A + CST)'s range does not overflow. */
2888 (with
2889 {
2890 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
2891 max_ovf = wi::OVF_OVERFLOW;
2892 tree inner_type = TREE_TYPE (@0);
2893
2894 wide_int w1
2895 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
2896 TYPE_SIGN (inner_type));
2897
2898 value_range vr;
2899 if (get_global_range_query ()->range_of_expr (vr, @0)
2900 && vr.kind () == VR_RANGE)
2901 {
2902 wide_int wmin0 = vr.lower_bound ();
2903 wide_int wmax0 = vr.upper_bound ();
2904 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
2905 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
2906 }
2907 }
2908 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
2909 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
2910 )))
2911 #endif
2912
2913 /* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */
2914 #if GIMPLE
2915 (for op (plus minus)
2916 (simplify
2917 (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2)
2918 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2919 && TREE_CODE (type) == INTEGER_TYPE
2920 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2921 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2922 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2923 && TYPE_OVERFLOW_WRAPS (type))
2924 (plus (convert @0) (op @2 (convert @1))))))
2925 #endif
2926
2927 /* (T)(A) +- (T)(B) -> (T)(A +- B) only when (A +- B) could be simplified
2928 to a simple value. */
2929 (for op (plus minus)
2930 (simplify
2931 (op (convert @0) (convert @1))
2932 (if (INTEGRAL_TYPE_P (type)
2933 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2934 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2935 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))
2936 && !TYPE_OVERFLOW_TRAPS (type)
2937 && !TYPE_OVERFLOW_SANITIZED (type))
2938 (convert (op! @0 @1)))))
2939
2940 /* ~A + A -> -1 */
2941 (simplify
2942 (plus:c (bit_not @0) @0)
2943 (if (!TYPE_OVERFLOW_TRAPS (type))
2944 { build_all_ones_cst (type); }))
2945
2946 /* ~A + 1 -> -A */
2947 (simplify
2948 (plus (convert? (bit_not @0)) integer_each_onep)
2949 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2950 (negate (convert @0))))
2951
2952 /* -A - 1 -> ~A */
2953 (simplify
2954 (minus (convert? (negate @0)) integer_each_onep)
2955 (if (!TYPE_OVERFLOW_TRAPS (type)
2956 && TREE_CODE (type) != COMPLEX_TYPE
2957 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2958 (bit_not (convert @0))))
2959
2960 /* -1 - A -> ~A */
2961 (simplify
2962 (minus integer_all_onesp @0)
2963 (if (TREE_CODE (type) != COMPLEX_TYPE)
2964 (bit_not @0)))
2965
2966 /* (T)(P + A) - (T)P -> (T) A */
2967 (simplify
2968 (minus (convert (plus:c @@0 @1))
2969 (convert? @0))
2970 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2971 /* For integer types, if A has a smaller type
2972 than T the result depends on the possible
2973 overflow in P + A.
2974 E.g. T=size_t, A=(unsigned)429497295, P>0.
2975 However, if an overflow in P + A would cause
2976 undefined behavior, we can assume that there
2977 is no overflow. */
2978 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2979 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2980 (convert @1)))
2981 (simplify
2982 (minus (convert (pointer_plus @@0 @1))
2983 (convert @0))
2984 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2985 /* For pointer types, if the conversion of A to the
2986 final type requires a sign- or zero-extension,
2987 then we have to punt - it is not defined which
2988 one is correct. */
2989 || (POINTER_TYPE_P (TREE_TYPE (@0))
2990 && TREE_CODE (@1) == INTEGER_CST
2991 && tree_int_cst_sign_bit (@1) == 0))
2992 (convert @1)))
2993 (simplify
2994 (pointer_diff (pointer_plus @@0 @1) @0)
2995 /* The second argument of pointer_plus must be interpreted as signed, and
2996 thus sign-extended if necessary. */
2997 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2998 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2999 second arg is unsigned even when we need to consider it as signed,
3000 we don't want to diagnose overflow here. */
3001 (convert (view_convert:stype @1))))
3002
3003 /* (T)P - (T)(P + A) -> -(T) A */
3004 (simplify
3005 (minus (convert? @0)
3006 (convert (plus:c @@0 @1)))
3007 (if (INTEGRAL_TYPE_P (type)
3008 && TYPE_OVERFLOW_UNDEFINED (type)
3009 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3010 (with { tree utype = unsigned_type_for (type); }
3011 (convert (negate (convert:utype @1))))
3012 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3013 /* For integer types, if A has a smaller type
3014 than T the result depends on the possible
3015 overflow in P + A.
3016 E.g. T=size_t, A=(unsigned)429497295, P>0.
3017 However, if an overflow in P + A would cause
3018 undefined behavior, we can assume that there
3019 is no overflow. */
3020 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3021 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
3022 (negate (convert @1)))))
3023 (simplify
3024 (minus (convert @0)
3025 (convert (pointer_plus @@0 @1)))
3026 (if (INTEGRAL_TYPE_P (type)
3027 && TYPE_OVERFLOW_UNDEFINED (type)
3028 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3029 (with { tree utype = unsigned_type_for (type); }
3030 (convert (negate (convert:utype @1))))
3031 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3032 /* For pointer types, if the conversion of A to the
3033 final type requires a sign- or zero-extension,
3034 then we have to punt - it is not defined which
3035 one is correct. */
3036 || (POINTER_TYPE_P (TREE_TYPE (@0))
3037 && TREE_CODE (@1) == INTEGER_CST
3038 && tree_int_cst_sign_bit (@1) == 0))
3039 (negate (convert @1)))))
3040 (simplify
3041 (pointer_diff @0 (pointer_plus @@0 @1))
3042 /* The second argument of pointer_plus must be interpreted as signed, and
3043 thus sign-extended if necessary. */
3044 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3045 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3046 second arg is unsigned even when we need to consider it as signed,
3047 we don't want to diagnose overflow here. */
3048 (negate (convert (view_convert:stype @1)))))
3049
3050 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
3051 (simplify
3052 (minus (convert (plus:c @@0 @1))
3053 (convert (plus:c @0 @2)))
3054 (if (INTEGRAL_TYPE_P (type)
3055 && TYPE_OVERFLOW_UNDEFINED (type)
3056 && element_precision (type) <= element_precision (TREE_TYPE (@1))
3057 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
3058 (with { tree utype = unsigned_type_for (type); }
3059 (convert (minus (convert:utype @1) (convert:utype @2))))
3060 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
3061 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
3062 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
3063 /* For integer types, if A has a smaller type
3064 than T the result depends on the possible
3065 overflow in P + A.
3066 E.g. T=size_t, A=(unsigned)429497295, P>0.
3067 However, if an overflow in P + A would cause
3068 undefined behavior, we can assume that there
3069 is no overflow. */
3070 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
3071 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3072 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
3073 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
3074 (minus (convert @1) (convert @2)))))
3075 (simplify
3076 (minus (convert (pointer_plus @@0 @1))
3077 (convert (pointer_plus @0 @2)))
3078 (if (INTEGRAL_TYPE_P (type)
3079 && TYPE_OVERFLOW_UNDEFINED (type)
3080 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
3081 (with { tree utype = unsigned_type_for (type); }
3082 (convert (minus (convert:utype @1) (convert:utype @2))))
3083 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
3084 /* For pointer types, if the conversion of A to the
3085 final type requires a sign- or zero-extension,
3086 then we have to punt - it is not defined which
3087 one is correct. */
3088 || (POINTER_TYPE_P (TREE_TYPE (@0))
3089 && TREE_CODE (@1) == INTEGER_CST
3090 && tree_int_cst_sign_bit (@1) == 0
3091 && TREE_CODE (@2) == INTEGER_CST
3092 && tree_int_cst_sign_bit (@2) == 0))
3093 (minus (convert @1) (convert @2)))))
3094 (simplify
3095 (pointer_diff (pointer_plus @0 @2) (pointer_plus @1 @2))
3096 (pointer_diff @0 @1))
3097 (simplify
3098 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
3099 /* The second argument of pointer_plus must be interpreted as signed, and
3100 thus sign-extended if necessary. */
3101 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
3102 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
3103 second arg is unsigned even when we need to consider it as signed,
3104 we don't want to diagnose overflow here. */
3105 (minus (convert (view_convert:stype @1))
3106 (convert (view_convert:stype @2)))))))
3107
3108 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
3109 Modeled after fold_plusminus_mult_expr. */
3110 (if (!TYPE_SATURATING (type)
3111 && (!FLOAT_TYPE_P (type) || flag_associative_math))
3112 (for plusminus (plus minus)
3113 (simplify
3114 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
3115 (if (!ANY_INTEGRAL_TYPE_P (type)
3116 || TYPE_OVERFLOW_WRAPS (type)
3117 || (INTEGRAL_TYPE_P (type)
3118 && tree_expr_nonzero_p (@0)
3119 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
3120 (if (single_use (@3) || single_use (@4))
3121 /* If @1 +- @2 is constant require a hard single-use on either
3122 original operand (but not on both). */
3123 (mult (plusminus @1 @2) @0)
3124 (mult! (plusminus @1 @2) @0)
3125 )))
3126 /* We cannot generate constant 1 for fract. */
3127 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
3128 (simplify
3129 (plusminus @0 (mult:c@3 @0 @2))
3130 (if ((!ANY_INTEGRAL_TYPE_P (type)
3131 || TYPE_OVERFLOW_WRAPS (type)
3132 /* For @0 + @0*@2 this transformation would introduce UB
3133 (where there was none before) for @0 in [-1,0] and @2 max.
3134 For @0 - @0*@2 this transformation would introduce UB
3135 for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */
3136 || (INTEGRAL_TYPE_P (type)
3137 && ((tree_expr_nonzero_p (@0)
3138 && expr_not_equal_to (@0,
3139 wi::minus_one (TYPE_PRECISION (type))))
3140 || (plusminus == PLUS_EXPR
3141 ? expr_not_equal_to (@2,
3142 wi::max_value (TYPE_PRECISION (type), SIGNED))
3143 /* Let's ignore the @0 -1 and @2 min case. */
3144 : (expr_not_equal_to (@2,
3145 wi::min_value (TYPE_PRECISION (type), SIGNED))
3146 && expr_not_equal_to (@2,
3147 wi::min_value (TYPE_PRECISION (type), SIGNED)
3148 + 1))))))
3149 && single_use (@3))
3150 (mult (plusminus { build_one_cst (type); } @2) @0)))
3151 (simplify
3152 (plusminus (mult:c@3 @0 @2) @0)
3153 (if ((!ANY_INTEGRAL_TYPE_P (type)
3154 || TYPE_OVERFLOW_WRAPS (type)
3155 /* For @0*@2 + @0 this transformation would introduce UB
3156 (where there was none before) for @0 in [-1,0] and @2 max.
3157 For @0*@2 - @0 this transformation would introduce UB
3158 for @0 0 and @2 min. */
3159 || (INTEGRAL_TYPE_P (type)
3160 && ((tree_expr_nonzero_p (@0)
3161 && (plusminus == MINUS_EXPR
3162 || expr_not_equal_to (@0,
3163 wi::minus_one (TYPE_PRECISION (type)))))
3164 || expr_not_equal_to (@2,
3165 (plusminus == PLUS_EXPR
3166 ? wi::max_value (TYPE_PRECISION (type), SIGNED)
3167 : wi::min_value (TYPE_PRECISION (type), SIGNED))))))
3168 && single_use (@3))
3169 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
3170
3171 #if GIMPLE
3172 /* Canonicalize X + (X << C) into X * (1 + (1 << C)) and
3173 (X << C1) + (X << C2) into X * ((1 << C1) + (1 << C2)). */
3174 (simplify
3175 (plus:c @0 (lshift:s @0 INTEGER_CST@1))
3176 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3177 && tree_fits_uhwi_p (@1)
3178 && tree_to_uhwi (@1) < element_precision (type)
3179 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3180 || optab_handler (smul_optab,
3181 TYPE_MODE (type)) != CODE_FOR_nothing))
3182 (with { tree t = type;
3183 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
3184 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1),
3185 element_precision (type));
3186 w += 1;
3187 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
3188 : t, w);
3189 cst = build_uniform_cst (t, cst); }
3190 (convert (mult (convert:t @0) { cst; })))))
3191 (simplify
3192 (plus (lshift:s @0 INTEGER_CST@1) (lshift:s @0 INTEGER_CST@2))
3193 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3194 && tree_fits_uhwi_p (@1)
3195 && tree_to_uhwi (@1) < element_precision (type)
3196 && tree_fits_uhwi_p (@2)
3197 && tree_to_uhwi (@2) < element_precision (type)
3198 && (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3199 || optab_handler (smul_optab,
3200 TYPE_MODE (type)) != CODE_FOR_nothing))
3201 (with { tree t = type;
3202 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
3203 unsigned int prec = element_precision (type);
3204 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), prec);
3205 w += wi::set_bit_in_zero (tree_to_uhwi (@2), prec);
3206 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
3207 : t, w);
3208 cst = build_uniform_cst (t, cst); }
3209 (convert (mult (convert:t @0) { cst; })))))
3210 #endif
3211
3212 /* Canonicalize (X*C1)|(X*C2) and (X*C1)^(X*C2) to (C1+C2)*X when
3213 tree_nonzero_bits allows IOR and XOR to be treated like PLUS.
3214 Likewise, handle (X<<C3) and X as legitimate variants of X*C. */
3215 (for op (bit_ior bit_xor)
3216 (simplify
3217 (op (mult:s@0 @1 INTEGER_CST@2)
3218 (mult:s@3 @1 INTEGER_CST@4))
3219 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3220 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3221 (mult @1
3222 { wide_int_to_tree (type, wi::to_wide (@2) + wi::to_wide (@4)); })))
3223 (simplify
3224 (op:c (mult:s@0 @1 INTEGER_CST@2)
3225 (lshift:s@3 @1 INTEGER_CST@4))
3226 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3227 && tree_int_cst_sgn (@4) > 0
3228 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3229 (with { wide_int wone = wi::one (TYPE_PRECISION (type));
3230 wide_int c = wi::add (wi::to_wide (@2),
3231 wi::lshift (wone, wi::to_wide (@4))); }
3232 (mult @1 { wide_int_to_tree (type, c); }))))
3233 (simplify
3234 (op:c (mult:s@0 @1 INTEGER_CST@2)
3235 @1)
3236 (if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)
3237 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@1)) == 0)
3238 (mult @1
3239 { wide_int_to_tree (type,
3240 wi::add (wi::to_wide (@2), 1)); })))
3241 (simplify
3242 (op (lshift:s@0 @1 INTEGER_CST@2)
3243 (lshift:s@3 @1 INTEGER_CST@4))
3244 (if (INTEGRAL_TYPE_P (type)
3245 && tree_int_cst_sgn (@2) > 0
3246 && tree_int_cst_sgn (@4) > 0
3247 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@3)) == 0)
3248 (with { tree t = type;
3249 if (!TYPE_OVERFLOW_WRAPS (t))
3250 t = unsigned_type_for (t);
3251 wide_int wone = wi::one (TYPE_PRECISION (t));
3252 wide_int c = wi::add (wi::lshift (wone, wi::to_wide (@2)),
3253 wi::lshift (wone, wi::to_wide (@4))); }
3254 (convert (mult:t (convert:t @1) { wide_int_to_tree (t,c); })))))
3255 (simplify
3256 (op:c (lshift:s@0 @1 INTEGER_CST@2)
3257 @1)
3258 (if (INTEGRAL_TYPE_P (type)
3259 && tree_int_cst_sgn (@2) > 0
3260 && (tree_nonzero_bits (@0) & tree_nonzero_bits (@1)) == 0)
3261 (with { tree t = type;
3262 if (!TYPE_OVERFLOW_WRAPS (t))
3263 t = unsigned_type_for (t);
3264 wide_int wone = wi::one (TYPE_PRECISION (t));
3265 wide_int c = wi::add (wi::lshift (wone, wi::to_wide (@2)), wone); }
3266 (convert (mult:t (convert:t @1) { wide_int_to_tree (t, c); }))))))
3267
3268 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
3269
3270 (for minmax (min max)
3271 (simplify
3272 (minmax @0 @0)
3273 @0))
3274 /* For fmin() and fmax(), skip folding when both are sNaN. */
3275 (for minmax (FMIN_ALL FMAX_ALL)
3276 (simplify
3277 (minmax @0 @0)
3278 (if (!tree_expr_maybe_signaling_nan_p (@0))
3279 @0)))
3280 /* min(max(x,y),y) -> y. */
3281 (simplify
3282 (min:c (max:c @0 @1) @1)
3283 @1)
3284 /* max(min(x,y),y) -> y. */
3285 (simplify
3286 (max:c (min:c @0 @1) @1)
3287 @1)
3288 /* max(a,-a) -> abs(a). */
3289 (simplify
3290 (max:c @0 (negate @0))
3291 (if (TREE_CODE (type) != COMPLEX_TYPE
3292 && (! ANY_INTEGRAL_TYPE_P (type)
3293 || TYPE_OVERFLOW_UNDEFINED (type)))
3294 (abs @0)))
3295 /* min(a,-a) -> -abs(a). */
3296 (simplify
3297 (min:c @0 (negate @0))
3298 (if (TREE_CODE (type) != COMPLEX_TYPE
3299 && (! ANY_INTEGRAL_TYPE_P (type)
3300 || TYPE_OVERFLOW_UNDEFINED (type)))
3301 (negate (abs @0))))
3302 (simplify
3303 (min @0 @1)
3304 (switch
3305 (if (INTEGRAL_TYPE_P (type)
3306 && TYPE_MIN_VALUE (type)
3307 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
3308 @1)
3309 (if (INTEGRAL_TYPE_P (type)
3310 && TYPE_MAX_VALUE (type)
3311 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
3312 @0)))
3313 (simplify
3314 (max @0 @1)
3315 (switch
3316 (if (INTEGRAL_TYPE_P (type)
3317 && TYPE_MAX_VALUE (type)
3318 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
3319 @1)
3320 (if (INTEGRAL_TYPE_P (type)
3321 && TYPE_MIN_VALUE (type)
3322 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
3323 @0)))
3324
3325 /* max (a, a + CST) -> a + CST where CST is positive. */
3326 /* max (a, a + CST) -> a where CST is negative. */
3327 (simplify
3328 (max:c @0 (plus@2 @0 INTEGER_CST@1))
3329 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3330 (if (tree_int_cst_sgn (@1) > 0)
3331 @2
3332 @0)))
3333
3334 /* min (a, a + CST) -> a where CST is positive. */
3335 /* min (a, a + CST) -> a + CST where CST is negative. */
3336 (simplify
3337 (min:c @0 (plus@2 @0 INTEGER_CST@1))
3338 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3339 (if (tree_int_cst_sgn (@1) > 0)
3340 @0
3341 @2)))
3342
3343 /* Simplify min (&var[off0], &var[off1]) etc. depending on whether
3344 the addresses are known to be less, equal or greater. */
3345 (for minmax (min max)
3346 cmp (lt gt)
3347 (simplify
3348 (minmax (convert1?@2 addr@0) (convert2?@3 addr@1))
3349 (with
3350 {
3351 poly_int64 off0, off1;
3352 tree base0, base1;
3353 int equal = address_compare (cmp, TREE_TYPE (@2), @0, @1, base0, base1,
3354 off0, off1, GENERIC);
3355 }
3356 (if (equal == 1)
3357 (if (minmax == MIN_EXPR)
3358 (if (known_le (off0, off1))
3359 @2
3360 (if (known_gt (off0, off1))
3361 @3))
3362 (if (known_ge (off0, off1))
3363 @2
3364 (if (known_lt (off0, off1))
3365 @3)))))))
3366
3367 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
3368 and the outer convert demotes the expression back to x's type. */
3369 (for minmax (min max)
3370 (simplify
3371 (convert (minmax@0 (convert @1) INTEGER_CST@2))
3372 (if (INTEGRAL_TYPE_P (type)
3373 && types_match (@1, type) && int_fits_type_p (@2, type)
3374 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
3375 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
3376 (minmax @1 (convert @2)))))
3377
3378 (for minmax (FMIN_ALL FMAX_ALL)
3379 /* If either argument is NaN and other one is not sNaN, return the other
3380 one. Avoid the transformation if we get (and honor) a signalling NaN. */
3381 (simplify
3382 (minmax:c @0 REAL_CST@1)
3383 (if (real_isnan (TREE_REAL_CST_PTR (@1))
3384 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)
3385 && !tree_expr_maybe_signaling_nan_p (@0))
3386 @0)))
3387 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
3388 functions to return the numeric arg if the other one is NaN.
3389 MIN and MAX don't honor that, so only transform if -ffinite-math-only
3390 is set. C99 doesn't require -0.0 to be handled, so we don't have to
3391 worry about it either. */
3392 (if (flag_finite_math_only)
3393 (simplify
3394 (FMIN_ALL @0 @1)
3395 (min @0 @1))
3396 (simplify
3397 (FMAX_ALL @0 @1)
3398 (max @0 @1)))
3399 /* min (-A, -B) -> -max (A, B) */
3400 (for minmax (min max FMIN_ALL FMAX_ALL)
3401 maxmin (max min FMAX_ALL FMIN_ALL)
3402 (simplify
3403 (minmax (negate:s@2 @0) (negate:s@3 @1))
3404 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3405 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3406 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3407 (negate (maxmin @0 @1)))))
3408 /* MIN (~X, ~Y) -> ~MAX (X, Y)
3409 MAX (~X, ~Y) -> ~MIN (X, Y) */
3410 (for minmax (min max)
3411 maxmin (max min)
3412 (simplify
3413 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
3414 (bit_not (maxmin @0 @1))))
3415
3416 /* MIN (X, Y) == X -> X <= Y */
3417 (for minmax (min min max max)
3418 cmp (eq ne eq ne )
3419 out (le gt ge lt )
3420 (simplify
3421 (cmp:c (minmax:c @0 @1) @0)
3422 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3423 (out @0 @1))))
3424 /* MIN (X, 5) == 0 -> X == 0
3425 MIN (X, 5) == 7 -> false */
3426 (for cmp (eq ne)
3427 (simplify
3428 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
3429 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
3430 TYPE_SIGN (TREE_TYPE (@0))))
3431 { constant_boolean_node (cmp == NE_EXPR, type); }
3432 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
3433 TYPE_SIGN (TREE_TYPE (@0))))
3434 (cmp @0 @2)))))
3435 (for cmp (eq ne)
3436 (simplify
3437 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
3438 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
3439 TYPE_SIGN (TREE_TYPE (@0))))
3440 { constant_boolean_node (cmp == NE_EXPR, type); }
3441 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
3442 TYPE_SIGN (TREE_TYPE (@0))))
3443 (cmp @0 @2)))))
3444 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
3445 (for minmax (min min max max min min max max )
3446 cmp (lt le gt ge gt ge lt le )
3447 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
3448 (simplify
3449 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
3450 (comb (cmp @0 @2) (cmp @1 @2))))
3451
3452 /* X <= MAX(X, Y) -> true
3453 X > MAX(X, Y) -> false
3454 X >= MIN(X, Y) -> true
3455 X < MIN(X, Y) -> false */
3456 (for minmax (min min max max )
3457 cmp (ge lt le gt )
3458 (simplify
3459 (cmp @0 (minmax:c @0 @1))
3460 { constant_boolean_node (cmp == GE_EXPR || cmp == LE_EXPR, type); } ))
3461
3462 /* Undo fancy ways of writing max/min or other ?: expressions, like
3463 a - ((a - b) & -(a < b)) and a - (a - b) * (a < b) into (a < b) ? b : a.
3464 People normally use ?: and that is what we actually try to optimize. */
3465 /* Transform A + (B-A)*cmp into cmp ? B : A. */
3466 (simplify
3467 (plus:c @0 (mult:c (minus @1 @0) zero_one_valued_p@2))
3468 (if (INTEGRAL_TYPE_P (type)
3469 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
3470 (cond (convert:boolean_type_node @2) @1 @0)))
3471 /* Transform A - (A-B)*cmp into cmp ? B : A. */
3472 (simplify
3473 (minus @0 (mult:c (minus @0 @1) zero_one_valued_p@2))
3474 (if (INTEGRAL_TYPE_P (type)
3475 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
3476 (cond (convert:boolean_type_node @2) @1 @0)))
3477 /* Transform A ^ (A^B)*cmp into cmp ? B : A. */
3478 (simplify
3479 (bit_xor:c @0 (mult:c (bit_xor:c @0 @1) zero_one_valued_p@2))
3480 (if (INTEGRAL_TYPE_P (type)
3481 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
3482 (cond (convert:boolean_type_node @2) @1 @0)))
3483
3484 /* (x <= 0 ? -x : 0) -> max(-x, 0). */
3485 (simplify
3486 (cond (le @0 integer_zerop@1) (negate@2 @0) integer_zerop@1)
3487 (max @2 @1))
3488
3489 /* Simplifications of shift and rotates. */
3490
3491 (for rotate (lrotate rrotate)
3492 (simplify
3493 (rotate integer_all_onesp@0 @1)
3494 @0))
3495
3496 /* Optimize -1 >> x for arithmetic right shifts. */
3497 (simplify
3498 (rshift integer_all_onesp@0 @1)
3499 (if (!TYPE_UNSIGNED (type))
3500 @0))
3501
3502 /* Optimize (x >> c) << c into x & (-1<<c). */
3503 (simplify
3504 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1)
3505 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
3506 /* It doesn't matter if the right shift is arithmetic or logical. */
3507 (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1))))
3508
3509 (simplify
3510 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1)
3511 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))
3512 /* Allow intermediate conversion to integral type with whatever sign, as
3513 long as the low TYPE_PRECISION (type)
3514 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */
3515 && INTEGRAL_TYPE_P (type)
3516 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3517 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3518 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
3519 && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type)
3520 || wi::geu_p (wi::to_wide (@1),
3521 TYPE_PRECISION (type)
3522 - TYPE_PRECISION (TREE_TYPE (@2)))))
3523 (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
3524
3525 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
3526 types. */
3527 (simplify
3528 (rshift (lshift @0 INTEGER_CST@1) @1)
3529 (if (TYPE_UNSIGNED (type)
3530 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
3531 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
3532
3533 /* Optimize x >> x into 0 */
3534 (simplify
3535 (rshift @0 @0)
3536 { build_zero_cst (type); })
3537
3538 (for shiftrotate (lrotate rrotate lshift rshift)
3539 (simplify
3540 (shiftrotate @0 integer_zerop)
3541 (non_lvalue @0))
3542 (simplify
3543 (shiftrotate integer_zerop@0 @1)
3544 @0)
3545 /* Prefer vector1 << scalar to vector1 << vector2
3546 if vector2 is uniform. */
3547 (for vec (VECTOR_CST CONSTRUCTOR)
3548 (simplify
3549 (shiftrotate @0 vec@1)
3550 (with { tree tem = uniform_vector_p (@1); }
3551 (if (tem)
3552 (shiftrotate @0 { tem; }))))))
3553
3554 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
3555 Y is 0. Similarly for X >> Y. */
3556 #if GIMPLE
3557 (for shift (lshift rshift)
3558 (simplify
3559 (shift @0 SSA_NAME@1)
3560 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3561 (with {
3562 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
3563 int prec = TYPE_PRECISION (TREE_TYPE (@1));
3564 }
3565 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
3566 @0)))))
3567 #endif
3568
3569 /* Rewrite an LROTATE_EXPR by a constant into an
3570 RROTATE_EXPR by a new constant. */
3571 (simplify
3572 (lrotate @0 INTEGER_CST@1)
3573 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
3574 build_int_cst (TREE_TYPE (@1),
3575 element_precision (type)), @1); }))
3576
3577 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
3578 (for op (lrotate rrotate rshift lshift)
3579 (simplify
3580 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
3581 (with { unsigned int prec = element_precision (type); }
3582 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
3583 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
3584 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
3585 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
3586 (with { unsigned int low = (tree_to_uhwi (@1)
3587 + tree_to_uhwi (@2)); }
3588 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
3589 being well defined. */
3590 (if (low >= prec)
3591 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
3592 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
3593 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
3594 { build_zero_cst (type); }
3595 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
3596 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
3597
3598
3599 /* Simplify (CST << x) & 1 to 0 if CST is even or to x == 0 if it is odd. */
3600 (simplify
3601 (bit_and (lshift INTEGER_CST@1 @0) integer_onep)
3602 (if ((wi::to_wide (@1) & 1) != 0)
3603 (convert (eq:boolean_type_node @0 { build_zero_cst (TREE_TYPE (@0)); }))
3604 { build_zero_cst (type); }))
3605
3606 /* Simplify ((C << x) & D) != 0 where C and D are power of two constants,
3607 either to false if D is smaller (unsigned comparison) than C, or to
3608 x == log2 (D) - log2 (C). Similarly for right shifts. */
3609 (for cmp (ne eq)
3610 icmp (eq ne)
3611 (simplify
3612 (cmp (bit_and (lshift integer_pow2p@1 @0) integer_pow2p@2) integer_zerop)
3613 (with { int c1 = wi::clz (wi::to_wide (@1));
3614 int c2 = wi::clz (wi::to_wide (@2)); }
3615 (if (c1 < c2)
3616 { constant_boolean_node (cmp == NE_EXPR ? false : true, type); }
3617 (icmp @0 { build_int_cst (TREE_TYPE (@0), c1 - c2); }))))
3618 (simplify
3619 (cmp (bit_and (rshift integer_pow2p@1 @0) integer_pow2p@2) integer_zerop)
3620 (if (tree_int_cst_sgn (@1) > 0)
3621 (with { int c1 = wi::clz (wi::to_wide (@1));
3622 int c2 = wi::clz (wi::to_wide (@2)); }
3623 (if (c1 > c2)
3624 { constant_boolean_node (cmp == NE_EXPR ? false : true, type); }
3625 (icmp @0 { build_int_cst (TREE_TYPE (@0), c2 - c1); }))))))
3626
3627 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
3628 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
3629 if CST2 != 0. */
3630 (for cmp (ne eq)
3631 (simplify
3632 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
3633 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
3634 (if (cand < 0
3635 || (!integer_zerop (@2)
3636 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
3637 { constant_boolean_node (cmp == NE_EXPR, type); }
3638 (if (!integer_zerop (@2)
3639 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
3640 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
3641
3642 /* Fold ((X << C1) & C2) cmp C3 into (X & (C2 >> C1)) cmp (C3 >> C1)
3643 ((X >> C1) & C2) cmp C3 into (X & (C2 << C1)) cmp (C3 << C1). */
3644 (for cmp (ne eq)
3645 (simplify
3646 (cmp (bit_and:s (lshift:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
3647 (if (tree_fits_shwi_p (@1)
3648 && tree_to_shwi (@1) > 0
3649 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
3650 (if (tree_to_shwi (@1) > wi::ctz (wi::to_wide (@3)))
3651 { constant_boolean_node (cmp == NE_EXPR, type); }
3652 (with { wide_int c1 = wi::to_wide (@1);
3653 wide_int c2 = wi::lrshift (wi::to_wide (@2), c1);
3654 wide_int c3 = wi::lrshift (wi::to_wide (@3), c1); }
3655 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0), c2); })
3656 { wide_int_to_tree (TREE_TYPE (@0), c3); })))))
3657 (simplify
3658 (cmp (bit_and:s (rshift:s @0 INTEGER_CST@1) INTEGER_CST@2) INTEGER_CST@3)
3659 (if (tree_fits_shwi_p (@1)
3660 && tree_to_shwi (@1) > 0
3661 && tree_to_shwi (@1) < TYPE_PRECISION (TREE_TYPE (@0)))
3662 (with { tree t0 = TREE_TYPE (@0);
3663 unsigned int prec = TYPE_PRECISION (t0);
3664 wide_int c1 = wi::to_wide (@1);
3665 wide_int c2 = wi::to_wide (@2);
3666 wide_int c3 = wi::to_wide (@3);
3667 wide_int sb = wi::set_bit_in_zero (prec - 1, prec); }
3668 (if ((c2 & c3) != c3)
3669 { constant_boolean_node (cmp == NE_EXPR, type); }
3670 (if (TYPE_UNSIGNED (t0))
3671 (if ((c3 & wi::arshift (sb, c1 - 1)) != 0)
3672 { constant_boolean_node (cmp == NE_EXPR, type); }
3673 (cmp (bit_and @0 { wide_int_to_tree (t0, c2 << c1); })
3674 { wide_int_to_tree (t0, c3 << c1); }))
3675 (with { wide_int smask = wi::arshift (sb, c1); }
3676 (switch
3677 (if ((c2 & smask) == 0)
3678 (cmp (bit_and @0 { wide_int_to_tree (t0, c2 << c1); })
3679 { wide_int_to_tree (t0, c3 << c1); }))
3680 (if ((c3 & smask) == 0)
3681 (cmp (bit_and @0 { wide_int_to_tree (t0, (c2 << c1) | sb); })
3682 { wide_int_to_tree (t0, c3 << c1); }))
3683 (if ((c2 & smask) != (c3 & smask))
3684 { constant_boolean_node (cmp == NE_EXPR, type); })
3685 (cmp (bit_and @0 { wide_int_to_tree (t0, (c2 << c1) | sb); })
3686 { wide_int_to_tree (t0, (c3 << c1) | sb); })))))))))
3687
3688 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
3689 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
3690 if the new mask might be further optimized. */
3691 (for shift (lshift rshift)
3692 (simplify
3693 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
3694 INTEGER_CST@2)
3695 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
3696 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
3697 && tree_fits_uhwi_p (@1)
3698 && tree_to_uhwi (@1) > 0
3699 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
3700 (with
3701 {
3702 unsigned int shiftc = tree_to_uhwi (@1);
3703 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
3704 unsigned HOST_WIDE_INT newmask, zerobits = 0;
3705 tree shift_type = TREE_TYPE (@3);
3706 unsigned int prec;
3707
3708 if (shift == LSHIFT_EXPR)
3709 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
3710 else if (shift == RSHIFT_EXPR
3711 && type_has_mode_precision_p (shift_type))
3712 {
3713 prec = TYPE_PRECISION (TREE_TYPE (@3));
3714 tree arg00 = @0;
3715 /* See if more bits can be proven as zero because of
3716 zero extension. */
3717 if (@3 != @0
3718 && TYPE_UNSIGNED (TREE_TYPE (@0)))
3719 {
3720 tree inner_type = TREE_TYPE (@0);
3721 if (type_has_mode_precision_p (inner_type)
3722 && TYPE_PRECISION (inner_type) < prec)
3723 {
3724 prec = TYPE_PRECISION (inner_type);
3725 /* See if we can shorten the right shift. */
3726 if (shiftc < prec)
3727 shift_type = inner_type;
3728 /* Otherwise X >> C1 is all zeros, so we'll optimize
3729 it into (X, 0) later on by making sure zerobits
3730 is all ones. */
3731 }
3732 }
3733 zerobits = HOST_WIDE_INT_M1U;
3734 if (shiftc < prec)
3735 {
3736 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
3737 zerobits <<= prec - shiftc;
3738 }
3739 /* For arithmetic shift if sign bit could be set, zerobits
3740 can contain actually sign bits, so no transformation is
3741 possible, unless MASK masks them all away. In that
3742 case the shift needs to be converted into logical shift. */
3743 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
3744 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
3745 {
3746 if ((mask & zerobits) == 0)
3747 shift_type = unsigned_type_for (TREE_TYPE (@3));
3748 else
3749 zerobits = 0;
3750 }
3751 }
3752 }
3753 /* ((X << 16) & 0xff00) is (X, 0). */
3754 (if ((mask & zerobits) == mask)
3755 { build_int_cst (type, 0); }
3756 (with { newmask = mask | zerobits; }
3757 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
3758 (with
3759 {
3760 /* Only do the transformation if NEWMASK is some integer
3761 mode's mask. */
3762 for (prec = BITS_PER_UNIT;
3763 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
3764 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
3765 break;
3766 }
3767 (if (prec < HOST_BITS_PER_WIDE_INT
3768 || newmask == HOST_WIDE_INT_M1U)
3769 (with
3770 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
3771 (if (!tree_int_cst_equal (newmaskt, @2))
3772 (if (shift_type != TREE_TYPE (@3))
3773 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
3774 (bit_and @4 { newmaskt; })))))))))))))
3775
3776 /* ((1 << n) & M) != 0 -> n == log2 (M) */
3777 (for cmp (ne eq)
3778 icmp (eq ne)
3779 (simplify
3780 (cmp
3781 (bit_and
3782 (nop_convert? (lshift integer_onep @0)) integer_pow2p@1) integer_zerop)
3783 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3784 (icmp @0 { wide_int_to_tree (TREE_TYPE (@0),
3785 wi::exact_log2 (wi::to_wide (@1))); }))))
3786
3787 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
3788 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
3789 (for shift (lshift rshift)
3790 (for bit_op (bit_and bit_xor bit_ior)
3791 (simplify
3792 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
3793 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
3794 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
3795 (if (mask)
3796 (bit_op (shift (convert @0) @1) { mask; })))))))
3797
3798 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
3799 (simplify
3800 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
3801 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
3802 && (element_precision (TREE_TYPE (@0))
3803 <= element_precision (TREE_TYPE (@1))
3804 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
3805 (with
3806 { tree shift_type = TREE_TYPE (@0); }
3807 (convert (rshift (convert:shift_type @1) @2)))))
3808
3809 /* ~(~X >>r Y) -> X >>r Y
3810 ~(~X <<r Y) -> X <<r Y */
3811 (for rotate (lrotate rrotate)
3812 (simplify
3813 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
3814 (if ((element_precision (TREE_TYPE (@0))
3815 <= element_precision (TREE_TYPE (@1))
3816 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
3817 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
3818 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
3819 (with
3820 { tree rotate_type = TREE_TYPE (@0); }
3821 (convert (rotate (convert:rotate_type @1) @2))))))
3822
3823 (for cmp (eq ne)
3824 (for rotate (lrotate rrotate)
3825 invrot (rrotate lrotate)
3826 /* (X >>r Y) cmp (Z >>r Y) may simplify to X cmp Y. */
3827 (simplify
3828 (cmp (rotate @1 @0) (rotate @2 @0))
3829 (cmp @1 @2))
3830 /* (X >>r C1) cmp C2 may simplify to X cmp C3. */
3831 (simplify
3832 (cmp (rotate @0 INTEGER_CST@1) INTEGER_CST@2)
3833 (cmp @0 { const_binop (invrot, TREE_TYPE (@0), @2, @1); }))
3834 /* (X >>r Y) cmp C where C is 0 or ~0, may simplify to X cmp C. */
3835 (simplify
3836 (cmp (rotate @0 @1) INTEGER_CST@2)
3837 (if (integer_zerop (@2) || integer_all_onesp (@2))
3838 (cmp @0 @2)))))
3839
3840 /* Narrow a lshift by constant. */
3841 (simplify
3842 (convert (lshift:s@0 @1 INTEGER_CST@2))
3843 (if (INTEGRAL_TYPE_P (type)
3844 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3845 && !integer_zerop (@2)
3846 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))
3847 (if (TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
3848 || wi::ltu_p (wi::to_wide (@2), TYPE_PRECISION (type)))
3849 (lshift (convert @1) @2)
3850 (if (wi::ltu_p (wi::to_wide (@2), TYPE_PRECISION (TREE_TYPE (@0))))
3851 { build_zero_cst (type); }))))
3852
3853 /* Simplifications of conversions. */
3854
3855 /* Basic strip-useless-type-conversions / strip_nops. */
3856 (for cvt (convert view_convert float fix_trunc)
3857 (simplify
3858 (cvt @0)
3859 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
3860 || (GENERIC && type == TREE_TYPE (@0)))
3861 @0)))
3862
3863 /* Contract view-conversions. */
3864 (simplify
3865 (view_convert (view_convert @0))
3866 (view_convert @0))
3867
3868 /* For integral conversions with the same precision or pointer
3869 conversions use a NOP_EXPR instead. */
3870 (simplify
3871 (view_convert @0)
3872 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3873 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3874 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
3875 (convert @0)))
3876
3877 /* Strip inner integral conversions that do not change precision or size, or
3878 zero-extend while keeping the same size (for bool-to-char). */
3879 (simplify
3880 (view_convert (convert@0 @1))
3881 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3882 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3883 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
3884 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
3885 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
3886 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
3887 (view_convert @1)))
3888
3889 /* Simplify a view-converted empty or single-element constructor. */
3890 (simplify
3891 (view_convert CONSTRUCTOR@0)
3892 (with
3893 { tree ctor = (TREE_CODE (@0) == SSA_NAME
3894 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); }
3895 (switch
3896 (if (CONSTRUCTOR_NELTS (ctor) == 0)
3897 { build_zero_cst (type); })
3898 (if (CONSTRUCTOR_NELTS (ctor) == 1
3899 && VECTOR_TYPE_P (TREE_TYPE (ctor))
3900 && operand_equal_p (TYPE_SIZE (type),
3901 TYPE_SIZE (TREE_TYPE
3902 (CONSTRUCTOR_ELT (ctor, 0)->value))))
3903 (view_convert { CONSTRUCTOR_ELT (ctor, 0)->value; })))))
3904
3905 /* Re-association barriers around constants and other re-association
3906 barriers can be removed. */
3907 (simplify
3908 (paren CONSTANT_CLASS_P@0)
3909 @0)
3910 (simplify
3911 (paren (paren@1 @0))
3912 @1)
3913
3914 /* Handle cases of two conversions in a row. */
3915 (for ocvt (convert float fix_trunc)
3916 (for icvt (convert float)
3917 (simplify
3918 (ocvt (icvt@1 @0))
3919 (with
3920 {
3921 tree inside_type = TREE_TYPE (@0);
3922 tree inter_type = TREE_TYPE (@1);
3923 int inside_int = INTEGRAL_TYPE_P (inside_type);
3924 int inside_ptr = POINTER_TYPE_P (inside_type);
3925 int inside_float = FLOAT_TYPE_P (inside_type);
3926 int inside_vec = VECTOR_TYPE_P (inside_type);
3927 unsigned int inside_prec = TYPE_PRECISION (inside_type);
3928 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
3929 int inter_int = INTEGRAL_TYPE_P (inter_type);
3930 int inter_ptr = POINTER_TYPE_P (inter_type);
3931 int inter_float = FLOAT_TYPE_P (inter_type);
3932 int inter_vec = VECTOR_TYPE_P (inter_type);
3933 unsigned int inter_prec = TYPE_PRECISION (inter_type);
3934 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
3935 int final_int = INTEGRAL_TYPE_P (type);
3936 int final_ptr = POINTER_TYPE_P (type);
3937 int final_float = FLOAT_TYPE_P (type);
3938 int final_vec = VECTOR_TYPE_P (type);
3939 unsigned int final_prec = TYPE_PRECISION (type);
3940 int final_unsignedp = TYPE_UNSIGNED (type);
3941 }
3942 (switch
3943 /* In addition to the cases of two conversions in a row
3944 handled below, if we are converting something to its own
3945 type via an object of identical or wider precision, neither
3946 conversion is needed. */
3947 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
3948 || (GENERIC
3949 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
3950 && (((inter_int || inter_ptr) && final_int)
3951 || (inter_float && final_float))
3952 && inter_prec >= final_prec)
3953 (ocvt @0))
3954
3955 /* Likewise, if the intermediate and initial types are either both
3956 float or both integer, we don't need the middle conversion if the
3957 former is wider than the latter and doesn't change the signedness
3958 (for integers). Avoid this if the final type is a pointer since
3959 then we sometimes need the middle conversion. */
3960 (if (((inter_int && inside_int) || (inter_float && inside_float))
3961 && (final_int || final_float)
3962 && inter_prec >= inside_prec
3963 && (inter_float || inter_unsignedp == inside_unsignedp))
3964 (ocvt @0))
3965
3966 /* If we have a sign-extension of a zero-extended value, we can
3967 replace that by a single zero-extension. Likewise if the
3968 final conversion does not change precision we can drop the
3969 intermediate conversion. */
3970 (if (inside_int && inter_int && final_int
3971 && ((inside_prec < inter_prec && inter_prec < final_prec
3972 && inside_unsignedp && !inter_unsignedp)
3973 || final_prec == inter_prec))
3974 (ocvt @0))
3975
3976 /* Two conversions in a row are not needed unless:
3977 - some conversion is floating-point (overstrict for now), or
3978 - some conversion is a vector (overstrict for now), or
3979 - the intermediate type is narrower than both initial and
3980 final, or
3981 - the intermediate type and innermost type differ in signedness,
3982 and the outermost type is wider than the intermediate, or
3983 - the initial type is a pointer type and the precisions of the
3984 intermediate and final types differ, or
3985 - the final type is a pointer type and the precisions of the
3986 initial and intermediate types differ. */
3987 (if (! inside_float && ! inter_float && ! final_float
3988 && ! inside_vec && ! inter_vec && ! final_vec
3989 && (inter_prec >= inside_prec || inter_prec >= final_prec)
3990 && ! (inside_int && inter_int
3991 && inter_unsignedp != inside_unsignedp
3992 && inter_prec < final_prec)
3993 && ((inter_unsignedp && inter_prec > inside_prec)
3994 == (final_unsignedp && final_prec > inter_prec))
3995 && ! (inside_ptr && inter_prec != final_prec)
3996 && ! (final_ptr && inside_prec != inter_prec))
3997 (ocvt @0))
3998
3999 /* A truncation to an unsigned type (a zero-extension) should be
4000 canonicalized as bitwise and of a mask. */
4001 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
4002 && final_int && inter_int && inside_int
4003 && final_prec == inside_prec
4004 && final_prec > inter_prec
4005 && inter_unsignedp)
4006 (convert (bit_and @0 { wide_int_to_tree
4007 (inside_type,
4008 wi::mask (inter_prec, false,
4009 TYPE_PRECISION (inside_type))); })))
4010
4011 /* If we are converting an integer to a floating-point that can
4012 represent it exactly and back to an integer, we can skip the
4013 floating-point conversion. */
4014 (if (GIMPLE /* PR66211 */
4015 && inside_int && inter_float && final_int &&
4016 (unsigned) significand_size (TYPE_MODE (inter_type))
4017 >= inside_prec - !inside_unsignedp)
4018 (convert @0)))))))
4019
4020 /* (float_type)(integer_type) x -> trunc (x) if the type of x matches
4021 float_type. Only do the transformation if we do not need to preserve
4022 trapping behaviour, so require !flag_trapping_math. */
4023 #if GIMPLE
4024 (simplify
4025 (float (fix_trunc @0))
4026 (if (!flag_trapping_math
4027 && types_match (type, TREE_TYPE (@0))
4028 && direct_internal_fn_supported_p (IFN_TRUNC, type,
4029 OPTIMIZE_FOR_BOTH))
4030 (IFN_TRUNC @0)))
4031 #endif
4032
4033 /* If we have a narrowing conversion to an integral type that is fed by a
4034 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
4035 masks off bits outside the final type (and nothing else). */
4036 (simplify
4037 (convert (bit_and @0 INTEGER_CST@1))
4038 (if (INTEGRAL_TYPE_P (type)
4039 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4040 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
4041 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
4042 TYPE_PRECISION (type)), 0))
4043 (convert @0)))
4044
4045
4046 /* (X /[ex] A) * A -> X. */
4047 (simplify
4048 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
4049 (convert @0))
4050
4051 /* Simplify (A / B) * B + (A % B) -> A. */
4052 (for div (trunc_div ceil_div floor_div round_div)
4053 mod (trunc_mod ceil_mod floor_mod round_mod)
4054 (simplify
4055 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
4056 @0))
4057
4058 /* x / y * y == x -> x % y == 0. */
4059 (simplify
4060 (eq:c (mult:c (trunc_div:s @0 @1) @1) @0)
4061 (if (TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE)
4062 (eq (trunc_mod @0 @1) { build_zero_cst (TREE_TYPE (@0)); })))
4063
4064 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
4065 (for op (plus minus)
4066 (simplify
4067 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
4068 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
4069 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
4070 (with
4071 {
4072 wi::overflow_type overflow;
4073 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
4074 TYPE_SIGN (type), &overflow);
4075 }
4076 (if (types_match (type, TREE_TYPE (@2))
4077 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
4078 (op @0 { wide_int_to_tree (type, mul); })
4079 (with { tree utype = unsigned_type_for (type); }
4080 (convert (op (convert:utype @0)
4081 (mult (convert:utype @1) (convert:utype @2))))))))))
4082
4083 /* Canonicalization of binary operations. */
4084
4085 /* Convert X + -C into X - C. */
4086 (simplify
4087 (plus @0 REAL_CST@1)
4088 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
4089 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
4090 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
4091 (minus @0 { tem; })))))
4092
4093 /* Convert x+x into x*2. */
4094 (simplify
4095 (plus @0 @0)
4096 (if (SCALAR_FLOAT_TYPE_P (type))
4097 (mult @0 { build_real (type, dconst2); })
4098 (if (INTEGRAL_TYPE_P (type))
4099 (mult @0 { build_int_cst (type, 2); }))))
4100
4101 /* 0 - X -> -X. */
4102 (simplify
4103 (minus integer_zerop @1)
4104 (negate @1))
4105 (simplify
4106 (pointer_diff integer_zerop @1)
4107 (negate (convert @1)))
4108
4109 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
4110 ARG0 is zero and X + ARG0 reduces to X, since that would mean
4111 (-ARG1 + ARG0) reduces to -ARG1. */
4112 (simplify
4113 (minus real_zerop@0 @1)
4114 (if (fold_real_zero_addition_p (type, @1, @0, 0))
4115 (negate @1)))
4116
4117 /* Transform x * -1 into -x. */
4118 (simplify
4119 (mult @0 integer_minus_onep)
4120 (negate @0))
4121
4122 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
4123 signed overflow for CST != 0 && CST != -1. */
4124 (simplify
4125 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
4126 (if (TREE_CODE (@2) != INTEGER_CST
4127 && single_use (@3)
4128 && !integer_zerop (@1) && !integer_minus_onep (@1))
4129 (mult (mult @0 @2) @1)))
4130
4131 /* True if we can easily extract the real and imaginary parts of a complex
4132 number. */
4133 (match compositional_complex
4134 (convert? (complex @0 @1)))
4135
4136 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
4137 (simplify
4138 (complex (realpart @0) (imagpart @0))
4139 @0)
4140 (simplify
4141 (realpart (complex @0 @1))
4142 @0)
4143 (simplify
4144 (imagpart (complex @0 @1))
4145 @1)
4146
4147 /* Sometimes we only care about half of a complex expression. */
4148 (simplify
4149 (realpart (convert?:s (conj:s @0)))
4150 (convert (realpart @0)))
4151 (simplify
4152 (imagpart (convert?:s (conj:s @0)))
4153 (convert (negate (imagpart @0))))
4154 (for part (realpart imagpart)
4155 (for op (plus minus)
4156 (simplify
4157 (part (convert?:s@2 (op:s @0 @1)))
4158 (convert (op (part @0) (part @1))))))
4159 (simplify
4160 (realpart (convert?:s (CEXPI:s @0)))
4161 (convert (COS @0)))
4162 (simplify
4163 (imagpart (convert?:s (CEXPI:s @0)))
4164 (convert (SIN @0)))
4165
4166 /* conj(conj(x)) -> x */
4167 (simplify
4168 (conj (convert? (conj @0)))
4169 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
4170 (convert @0)))
4171
4172 /* conj({x,y}) -> {x,-y} */
4173 (simplify
4174 (conj (convert?:s (complex:s @0 @1)))
4175 (with { tree itype = TREE_TYPE (type); }
4176 (complex (convert:itype @0) (negate (convert:itype @1)))))
4177
4178 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
4179 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32
4180 BUILT_IN_BSWAP64 BUILT_IN_BSWAP128)
4181 (simplify
4182 (bswap (bswap @0))
4183 @0)
4184 (simplify
4185 (bswap (bit_not (bswap @0)))
4186 (bit_not @0))
4187 (for bitop (bit_xor bit_ior bit_and)
4188 (simplify
4189 (bswap (bitop:c (bswap @0) @1))
4190 (bitop @0 (bswap @1))))
4191 (for cmp (eq ne)
4192 (simplify
4193 (cmp (bswap@2 @0) (bswap @1))
4194 (with { tree ctype = TREE_TYPE (@2); }
4195 (cmp (convert:ctype @0) (convert:ctype @1))))
4196 (simplify
4197 (cmp (bswap @0) INTEGER_CST@1)
4198 (with { tree ctype = TREE_TYPE (@1); }
4199 (cmp (convert:ctype @0) (bswap! @1)))))
4200 /* (bswap(x) >> C1) & C2 can sometimes be simplified to (x >> C3) & C2. */
4201 (simplify
4202 (bit_and (convert1? (rshift@0 (convert2? (bswap@4 @1)) INTEGER_CST@2))
4203 INTEGER_CST@3)
4204 (if (BITS_PER_UNIT == 8
4205 && tree_fits_uhwi_p (@2)
4206 && tree_fits_uhwi_p (@3))
4207 (with
4208 {
4209 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@4));
4210 unsigned HOST_WIDE_INT bits = tree_to_uhwi (@2);
4211 unsigned HOST_WIDE_INT mask = tree_to_uhwi (@3);
4212 unsigned HOST_WIDE_INT lo = bits & 7;
4213 unsigned HOST_WIDE_INT hi = bits - lo;
4214 }
4215 (if (bits < prec
4216 && mask < (256u>>lo)
4217 && bits < TYPE_PRECISION (TREE_TYPE(@0)))
4218 (with { unsigned HOST_WIDE_INT ns = (prec - (hi + 8)) + lo; }
4219 (if (ns == 0)
4220 (bit_and (convert @1) @3)
4221 (with
4222 {
4223 tree utype = unsigned_type_for (TREE_TYPE (@1));
4224 tree nst = build_int_cst (integer_type_node, ns);
4225 }
4226 (bit_and (convert (rshift:utype (convert:utype @1) {nst;})) @3))))))))
4227 /* bswap(x) >> C1 can sometimes be simplified to (T)x >> C2. */
4228 (simplify
4229 (rshift (convert? (bswap@2 @0)) INTEGER_CST@1)
4230 (if (BITS_PER_UNIT == 8
4231 && CHAR_TYPE_SIZE == 8
4232 && tree_fits_uhwi_p (@1))
4233 (with
4234 {
4235 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@2));
4236 unsigned HOST_WIDE_INT bits = tree_to_uhwi (@1);
4237 /* If the bswap was extended before the original shift, this
4238 byte (shift) has the sign of the extension, not the sign of
4239 the original shift. */
4240 tree st = TYPE_PRECISION (type) > prec ? TREE_TYPE (@2) : type;
4241 }
4242 /* Special case: logical right shift of sign-extended bswap.
4243 (unsigned)(short)bswap16(x)>>12 is (unsigned)((short)x<<8)>>12. */
4244 (if (TYPE_PRECISION (type) > prec
4245 && !TYPE_UNSIGNED (TREE_TYPE (@2))
4246 && TYPE_UNSIGNED (type)
4247 && bits < prec && bits + 8 >= prec)
4248 (with { tree nst = build_int_cst (integer_type_node, prec - 8); }
4249 (rshift (convert (lshift:st (convert:st @0) {nst;})) @1))
4250 (if (bits + 8 == prec)
4251 (if (TYPE_UNSIGNED (st))
4252 (convert (convert:unsigned_char_type_node @0))
4253 (convert (convert:signed_char_type_node @0)))
4254 (if (bits < prec && bits + 8 > prec)
4255 (with
4256 {
4257 tree nst = build_int_cst (integer_type_node, bits & 7);
4258 tree bt = TYPE_UNSIGNED (st) ? unsigned_char_type_node
4259 : signed_char_type_node;
4260 }
4261 (convert (rshift:bt (convert:bt @0) {nst;})))))))))
4262 /* bswap(x) & C1 can sometimes be simplified to (x >> C2) & C1. */
4263 (simplify
4264 (bit_and (convert? (bswap@2 @0)) INTEGER_CST@1)
4265 (if (BITS_PER_UNIT == 8
4266 && tree_fits_uhwi_p (@1)
4267 && tree_to_uhwi (@1) < 256)
4268 (with
4269 {
4270 unsigned HOST_WIDE_INT prec = TYPE_PRECISION (TREE_TYPE (@2));
4271 tree utype = unsigned_type_for (TREE_TYPE (@0));
4272 tree nst = build_int_cst (integer_type_node, prec - 8);
4273 }
4274 (bit_and (convert (rshift:utype (convert:utype @0) {nst;})) @1)))))
4275
4276
4277 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
4278
4279 /* Simplify constant conditions.
4280 Only optimize constant conditions when the selected branch
4281 has the same type as the COND_EXPR. This avoids optimizing
4282 away "c ? x : throw", where the throw has a void type.
4283 Note that we cannot throw away the fold-const.cc variant nor
4284 this one as we depend on doing this transform before possibly
4285 A ? B : B -> B triggers and the fold-const.cc one can optimize
4286 0 ? A : B to B even if A has side-effects. Something
4287 genmatch cannot handle. */
4288 (simplify
4289 (cond INTEGER_CST@0 @1 @2)
4290 (if (integer_zerop (@0))
4291 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
4292 @2)
4293 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
4294 @1)))
4295 (simplify
4296 (vec_cond VECTOR_CST@0 @1 @2)
4297 (if (integer_all_onesp (@0))
4298 @1
4299 (if (integer_zerop (@0))
4300 @2)))
4301
4302 /* Sink unary operations to branches, but only if we do fold both. */
4303 (for op (negate bit_not abs absu)
4304 (simplify
4305 (op (vec_cond:s @0 @1 @2))
4306 (vec_cond @0 (op! @1) (op! @2))))
4307
4308 /* Sink binary operation to branches, but only if we can fold it. */
4309 (for op (tcc_comparison plus minus mult bit_and bit_ior bit_xor
4310 lshift rshift rdiv trunc_div ceil_div floor_div round_div
4311 trunc_mod ceil_mod floor_mod round_mod min max)
4312 /* (c ? a : b) op (c ? d : e) --> c ? (a op d) : (b op e) */
4313 (simplify
4314 (op (vec_cond:s @0 @1 @2) (vec_cond:s @0 @3 @4))
4315 (vec_cond @0 (op! @1 @3) (op! @2 @4)))
4316
4317 /* (c ? a : b) op d --> c ? (a op d) : (b op d) */
4318 (simplify
4319 (op (vec_cond:s @0 @1 @2) @3)
4320 (vec_cond @0 (op! @1 @3) (op! @2 @3)))
4321 (simplify
4322 (op @3 (vec_cond:s @0 @1 @2))
4323 (vec_cond @0 (op! @3 @1) (op! @3 @2))))
4324
4325 #if GIMPLE
4326 (match (nop_atomic_bit_test_and_p @0 @1 @4)
4327 (bit_and (convert?@4 (ATOMIC_FETCH_OR_XOR_N @2 INTEGER_CST@0 @3))
4328 INTEGER_CST@1)
4329 (with {
4330 int ibit = tree_log2 (@0);
4331 int ibit2 = tree_log2 (@1);
4332 }
4333 (if (ibit == ibit2
4334 && ibit >= 0
4335 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4336
4337 (match (nop_atomic_bit_test_and_p @0 @1 @3)
4338 (bit_and (convert?@3 (SYNC_FETCH_OR_XOR_N @2 INTEGER_CST@0))
4339 INTEGER_CST@1)
4340 (with {
4341 int ibit = tree_log2 (@0);
4342 int ibit2 = tree_log2 (@1);
4343 }
4344 (if (ibit == ibit2
4345 && ibit >= 0
4346 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4347
4348 (match (nop_atomic_bit_test_and_p @0 @0 @4)
4349 (bit_and:c
4350 (convert1?@4
4351 (ATOMIC_FETCH_OR_XOR_N @2 (nop_convert? (lshift@0 integer_onep@5 @6)) @3))
4352 (convert2? @0))
4353 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))))
4354
4355 (match (nop_atomic_bit_test_and_p @0 @0 @4)
4356 (bit_and:c
4357 (convert1?@4
4358 (SYNC_FETCH_OR_XOR_N @2 (nop_convert? (lshift@0 integer_onep@3 @5))))
4359 (convert2? @0))
4360 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)))))
4361
4362 (match (nop_atomic_bit_test_and_p @0 @1 @3)
4363 (bit_and@4 (convert?@3 (ATOMIC_FETCH_AND_N @2 INTEGER_CST@0 @5))
4364 INTEGER_CST@1)
4365 (with {
4366 int ibit = wi::exact_log2 (wi::zext (wi::bit_not (wi::to_wide (@0)),
4367 TYPE_PRECISION(type)));
4368 int ibit2 = tree_log2 (@1);
4369 }
4370 (if (ibit == ibit2
4371 && ibit >= 0
4372 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4373
4374 (match (nop_atomic_bit_test_and_p @0 @1 @3)
4375 (bit_and@4
4376 (convert?@3 (SYNC_FETCH_AND_AND_N @2 INTEGER_CST@0))
4377 INTEGER_CST@1)
4378 (with {
4379 int ibit = wi::exact_log2 (wi::zext (wi::bit_not (wi::to_wide (@0)),
4380 TYPE_PRECISION(type)));
4381 int ibit2 = tree_log2 (@1);
4382 }
4383 (if (ibit == ibit2
4384 && ibit >= 0
4385 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))))))
4386
4387 (match (nop_atomic_bit_test_and_p @4 @0 @3)
4388 (bit_and:c
4389 (convert1?@3
4390 (ATOMIC_FETCH_AND_N @2 (nop_convert?@4 (bit_not (lshift@0 integer_onep@6 @7))) @5))
4391 (convert2? @0))
4392 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@4)))))
4393
4394 (match (nop_atomic_bit_test_and_p @4 @0 @3)
4395 (bit_and:c
4396 (convert1?@3
4397 (SYNC_FETCH_AND_AND_N @2 (nop_convert?@4 (bit_not (lshift@0 integer_onep@6 @7)))))
4398 (convert2? @0))
4399 (if (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@4)))))
4400
4401 #endif
4402
4403 /* (v ? w : 0) ? a : b is just (v & w) ? a : b
4404 Currently disabled after pass lvec because ARM understands
4405 VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
4406 (simplify
4407 (vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
4408 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4409 (vec_cond (bit_and @0 @3) @1 @2)))
4410 (simplify
4411 (vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
4412 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4413 (vec_cond (bit_ior @0 @3) @1 @2)))
4414 (simplify
4415 (vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
4416 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4417 (vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
4418 (simplify
4419 (vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
4420 (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
4421 (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
4422
4423 /* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
4424 (simplify
4425 (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
4426 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4427 (vec_cond (bit_and @0 @1) @2 @3)))
4428 (simplify
4429 (vec_cond @0 @2 (vec_cond:s @1 @2 @3))
4430 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4431 (vec_cond (bit_ior @0 @1) @2 @3)))
4432 (simplify
4433 (vec_cond @0 (vec_cond:s @1 @2 @3) @2)
4434 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4435 (vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
4436 (simplify
4437 (vec_cond @0 @3 (vec_cond:s @1 @2 @3))
4438 (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
4439 (vec_cond (bit_and (bit_not @0) @1) @2 @3)))
4440
4441 /* Canonicalize mask ? { 0, ... } : { -1, ...} to ~mask if the mask
4442 types are compatible. */
4443 (simplify
4444 (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2)
4445 (if (VECTOR_BOOLEAN_TYPE_P (type)
4446 && types_match (type, TREE_TYPE (@0)))
4447 (if (integer_zerop (@1) && integer_all_onesp (@2))
4448 (bit_not @0)
4449 (if (integer_all_onesp (@1) && integer_zerop (@2))
4450 @0))))
4451
4452 /* A few simplifications of "a ? CST1 : CST2". */
4453 /* NOTE: Only do this on gimple as the if-chain-to-switch
4454 optimization depends on the gimple to have if statements in it. */
4455 #if GIMPLE
4456 (simplify
4457 (cond @0 INTEGER_CST@1 INTEGER_CST@2)
4458 (switch
4459 (if (integer_zerop (@2))
4460 (switch
4461 /* a ? 1 : 0 -> a if 0 and 1 are integral types. */
4462 (if (integer_onep (@1))
4463 (convert (convert:boolean_type_node @0)))
4464 /* a ? powerof2cst : 0 -> a << (log2(powerof2cst)) */
4465 (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@1))
4466 (with {
4467 tree shift = build_int_cst (integer_type_node, tree_log2 (@1));
4468 }
4469 (lshift (convert (convert:boolean_type_node @0)) { shift; })))
4470 /* a ? -1 : 0 -> -a. No need to check the TYPE_PRECISION not being 1
4471 here as the powerof2cst case above will handle that case correctly. */
4472 (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@1))
4473 (negate (convert (convert:boolean_type_node @0))))))
4474 (if (integer_zerop (@1))
4475 (with {
4476 tree booltrue = constant_boolean_node (true, boolean_type_node);
4477 }
4478 (switch
4479 /* a ? 0 : 1 -> !a. */
4480 (if (integer_onep (@2))
4481 (convert (bit_xor (convert:boolean_type_node @0) { booltrue; } )))
4482 /* a ? powerof2cst : 0 -> (!a) << (log2(powerof2cst)) */
4483 (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@2))
4484 (with {
4485 tree shift = build_int_cst (integer_type_node, tree_log2 (@2));
4486 }
4487 (lshift (convert (bit_xor (convert:boolean_type_node @0) { booltrue; } ))
4488 { shift; })))
4489 /* a ? -1 : 0 -> -(!a). No need to check the TYPE_PRECISION not being 1
4490 here as the powerof2cst case above will handle that case correctly. */
4491 (if (INTEGRAL_TYPE_P (type) && integer_all_onesp (@2))
4492 (negate (convert (bit_xor (convert:boolean_type_node @0) { booltrue; } ))))
4493 )
4494 )
4495 )
4496 )
4497 )
4498 #endif
4499
4500 (simplify
4501 (convert (cond@0 @1 INTEGER_CST@2 INTEGER_CST@3))
4502 (if (INTEGRAL_TYPE_P (type)
4503 && INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4504 (cond @1 (convert @2) (convert @3))))
4505
4506 /* Simplification moved from fold_cond_expr_with_comparison. It may also
4507 be extended. */
4508 /* This pattern implements two kinds simplification:
4509
4510 Case 1)
4511 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
4512 1) Conversions are type widening from smaller type.
4513 2) Const c1 equals to c2 after canonicalizing comparison.
4514 3) Comparison has tree code LT, LE, GT or GE.
4515 This specific pattern is needed when (cmp (convert x) c) may not
4516 be simplified by comparison patterns because of multiple uses of
4517 x. It also makes sense here because simplifying across multiple
4518 referred var is always benefitial for complicated cases.
4519
4520 Case 2)
4521 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
4522 (for cmp (lt le gt ge eq)
4523 (simplify
4524 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
4525 (with
4526 {
4527 tree from_type = TREE_TYPE (@1);
4528 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
4529 enum tree_code code = ERROR_MARK;
4530
4531 if (INTEGRAL_TYPE_P (from_type)
4532 && int_fits_type_p (@2, from_type)
4533 && (types_match (c1_type, from_type)
4534 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
4535 && (TYPE_UNSIGNED (from_type)
4536 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
4537 && (types_match (c2_type, from_type)
4538 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
4539 && (TYPE_UNSIGNED (from_type)
4540 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
4541 {
4542 if (cmp != EQ_EXPR)
4543 {
4544 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
4545 {
4546 /* X <= Y - 1 equals to X < Y. */
4547 if (cmp == LE_EXPR)
4548 code = LT_EXPR;
4549 /* X > Y - 1 equals to X >= Y. */
4550 if (cmp == GT_EXPR)
4551 code = GE_EXPR;
4552 }
4553 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
4554 {
4555 /* X < Y + 1 equals to X <= Y. */
4556 if (cmp == LT_EXPR)
4557 code = LE_EXPR;
4558 /* X >= Y + 1 equals to X > Y. */
4559 if (cmp == GE_EXPR)
4560 code = GT_EXPR;
4561 }
4562 if (code != ERROR_MARK
4563 || wi::to_widest (@2) == wi::to_widest (@3))
4564 {
4565 if (cmp == LT_EXPR || cmp == LE_EXPR)
4566 code = MIN_EXPR;
4567 if (cmp == GT_EXPR || cmp == GE_EXPR)
4568 code = MAX_EXPR;
4569 }
4570 }
4571 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
4572 else if (int_fits_type_p (@3, from_type))
4573 code = EQ_EXPR;
4574 }
4575 }
4576 (if (code == MAX_EXPR)
4577 (convert (max @1 (convert @2)))
4578 (if (code == MIN_EXPR)
4579 (convert (min @1 (convert @2)))
4580 (if (code == EQ_EXPR)
4581 (convert (cond (eq @1 (convert @3))
4582 (convert:from_type @3) (convert:from_type @2)))))))))
4583
4584 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
4585
4586 1) OP is PLUS or MINUS.
4587 2) CMP is LT, LE, GT or GE.
4588 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
4589
4590 This pattern also handles special cases like:
4591
4592 A) Operand x is a unsigned to signed type conversion and c1 is
4593 integer zero. In this case,
4594 (signed type)x < 0 <=> x > MAX_VAL(signed type)
4595 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
4596 B) Const c1 may not equal to (C3 op' C2). In this case we also
4597 check equality for (c1+1) and (c1-1) by adjusting comparison
4598 code.
4599
4600 TODO: Though signed type is handled by this pattern, it cannot be
4601 simplified at the moment because C standard requires additional
4602 type promotion. In order to match&simplify it here, the IR needs
4603 to be cleaned up by other optimizers, i.e, VRP. */
4604 (for op (plus minus)
4605 (for cmp (lt le gt ge)
4606 (simplify
4607 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
4608 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
4609 (if (types_match (from_type, to_type)
4610 /* Check if it is special case A). */
4611 || (TYPE_UNSIGNED (from_type)
4612 && !TYPE_UNSIGNED (to_type)
4613 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
4614 && integer_zerop (@1)
4615 && (cmp == LT_EXPR || cmp == GE_EXPR)))
4616 (with
4617 {
4618 wi::overflow_type overflow = wi::OVF_NONE;
4619 enum tree_code code, cmp_code = cmp;
4620 wide_int real_c1;
4621 wide_int c1 = wi::to_wide (@1);
4622 wide_int c2 = wi::to_wide (@2);
4623 wide_int c3 = wi::to_wide (@3);
4624 signop sgn = TYPE_SIGN (from_type);
4625
4626 /* Handle special case A), given x of unsigned type:
4627 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
4628 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
4629 if (!types_match (from_type, to_type))
4630 {
4631 if (cmp_code == LT_EXPR)
4632 cmp_code = GT_EXPR;
4633 if (cmp_code == GE_EXPR)
4634 cmp_code = LE_EXPR;
4635 c1 = wi::max_value (to_type);
4636 }
4637 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
4638 compute (c3 op' c2) and check if it equals to c1 with op' being
4639 the inverted operator of op. Make sure overflow doesn't happen
4640 if it is undefined. */
4641 if (op == PLUS_EXPR)
4642 real_c1 = wi::sub (c3, c2, sgn, &overflow);
4643 else
4644 real_c1 = wi::add (c3, c2, sgn, &overflow);
4645
4646 code = cmp_code;
4647 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
4648 {
4649 /* Check if c1 equals to real_c1. Boundary condition is handled
4650 by adjusting comparison operation if necessary. */
4651 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
4652 && !overflow)
4653 {
4654 /* X <= Y - 1 equals to X < Y. */
4655 if (cmp_code == LE_EXPR)
4656 code = LT_EXPR;
4657 /* X > Y - 1 equals to X >= Y. */
4658 if (cmp_code == GT_EXPR)
4659 code = GE_EXPR;
4660 }
4661 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
4662 && !overflow)
4663 {
4664 /* X < Y + 1 equals to X <= Y. */
4665 if (cmp_code == LT_EXPR)
4666 code = LE_EXPR;
4667 /* X >= Y + 1 equals to X > Y. */
4668 if (cmp_code == GE_EXPR)
4669 code = GT_EXPR;
4670 }
4671 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
4672 {
4673 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
4674 code = MIN_EXPR;
4675 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
4676 code = MAX_EXPR;
4677 }
4678 }
4679 }
4680 (if (code == MAX_EXPR)
4681 (op (max @X { wide_int_to_tree (from_type, real_c1); })
4682 { wide_int_to_tree (from_type, c2); })
4683 (if (code == MIN_EXPR)
4684 (op (min @X { wide_int_to_tree (from_type, real_c1); })
4685 { wide_int_to_tree (from_type, c2); })))))))))
4686
4687 #if GIMPLE
4688 /* A >= B ? A : B -> max (A, B) and friends. The code is still
4689 in fold_cond_expr_with_comparison for GENERIC folding with
4690 some extra constraints. */
4691 (for cmp (eq ne le lt unle unlt ge gt unge ungt uneq ltgt)
4692 (simplify
4693 (cond (cmp:c (nop_convert1?@c0 @0) (nop_convert2?@c1 @1))
4694 (convert3? @0) (convert4? @1))
4695 (if (!HONOR_SIGNED_ZEROS (type)
4696 && (/* Allow widening conversions of the compare operands as data. */
4697 (INTEGRAL_TYPE_P (type)
4698 && types_match (TREE_TYPE (@c0), TREE_TYPE (@0))
4699 && types_match (TREE_TYPE (@c1), TREE_TYPE (@1))
4700 && TYPE_PRECISION (TREE_TYPE (@0)) <= TYPE_PRECISION (type)
4701 && TYPE_PRECISION (TREE_TYPE (@1)) <= TYPE_PRECISION (type))
4702 /* Or sign conversions for the comparison. */
4703 || (types_match (type, TREE_TYPE (@0))
4704 && types_match (type, TREE_TYPE (@1)))))
4705 (switch
4706 (if (cmp == EQ_EXPR)
4707 (if (VECTOR_TYPE_P (type))
4708 (view_convert @c1)
4709 (convert @c1)))
4710 (if (cmp == NE_EXPR)
4711 (if (VECTOR_TYPE_P (type))
4712 (view_convert @c0)
4713 (convert @c0)))
4714 (if (cmp == LE_EXPR || cmp == UNLE_EXPR || cmp == LT_EXPR || cmp == UNLT_EXPR)
4715 (if (!HONOR_NANS (type))
4716 (if (VECTOR_TYPE_P (type))
4717 (view_convert (min @c0 @c1))
4718 (convert (min @c0 @c1)))))
4719 (if (cmp == GE_EXPR || cmp == UNGE_EXPR || cmp == GT_EXPR || cmp == UNGT_EXPR)
4720 (if (!HONOR_NANS (type))
4721 (if (VECTOR_TYPE_P (type))
4722 (view_convert (max @c0 @c1))
4723 (convert (max @c0 @c1)))))
4724 (if (cmp == UNEQ_EXPR)
4725 (if (!HONOR_NANS (type))
4726 (if (VECTOR_TYPE_P (type))
4727 (view_convert @c1)
4728 (convert @c1))))
4729 (if (cmp == LTGT_EXPR)
4730 (if (!HONOR_NANS (type))
4731 (if (VECTOR_TYPE_P (type))
4732 (view_convert @c0)
4733 (convert @c0))))))))
4734 #endif
4735
4736 /* X != C1 ? -X : C2 simplifies to -X when -C1 == C2. */
4737 (simplify
4738 (cond (ne @0 INTEGER_CST@1) (negate@3 @0) INTEGER_CST@2)
4739 (if (!TYPE_SATURATING (type)
4740 && (TYPE_OVERFLOW_WRAPS (type)
4741 || !wi::only_sign_bit_p (wi::to_wide (@1)))
4742 && wi::eq_p (wi::neg (wi::to_wide (@1)), wi::to_wide (@2)))
4743 @3))
4744
4745 /* X != C1 ? ~X : C2 simplifies to ~X when ~C1 == C2. */
4746 (simplify
4747 (cond (ne @0 INTEGER_CST@1) (bit_not@3 @0) INTEGER_CST@2)
4748 (if (wi::eq_p (wi::bit_not (wi::to_wide (@1)), wi::to_wide (@2)))
4749 @3))
4750
4751 /* (X + 1) > Y ? -X : 1 simplifies to X >= Y ? -X : 1 when
4752 X is unsigned, as when X + 1 overflows, X is -1, so -X == 1. */
4753 (simplify
4754 (cond (gt (plus @0 integer_onep) @1) (negate @0) integer_onep@2)
4755 (if (TYPE_UNSIGNED (type))
4756 (cond (ge @0 @1) (negate @0) @2)))
4757
4758 (for cnd (cond vec_cond)
4759 /* A ? B : (A ? X : C) -> A ? B : C. */
4760 (simplify
4761 (cnd @0 (cnd @0 @1 @2) @3)
4762 (cnd @0 @1 @3))
4763 (simplify
4764 (cnd @0 @1 (cnd @0 @2 @3))
4765 (cnd @0 @1 @3))
4766 /* A ? B : (!A ? C : X) -> A ? B : C. */
4767 /* ??? This matches embedded conditions open-coded because genmatch
4768 would generate matching code for conditions in separate stmts only.
4769 The following is still important to merge then and else arm cases
4770 from if-conversion. */
4771 (simplify
4772 (cnd @0 @1 (cnd @2 @3 @4))
4773 (if (inverse_conditions_p (@0, @2))
4774 (cnd @0 @1 @3)))
4775 (simplify
4776 (cnd @0 (cnd @1 @2 @3) @4)
4777 (if (inverse_conditions_p (@0, @1))
4778 (cnd @0 @3 @4)))
4779
4780 /* A ? B : B -> B. */
4781 (simplify
4782 (cnd @0 @1 @1)
4783 @1)
4784
4785 /* !A ? B : C -> A ? C : B. */
4786 (simplify
4787 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
4788 (cnd @0 @2 @1)))
4789
4790 /* abs/negative simplifications moved from fold_cond_expr_with_comparison,
4791 Need to handle (A - B) case as fold_cond_expr_with_comparison does.
4792 Need to handle UN* comparisons.
4793
4794 None of these transformations work for modes with signed
4795 zeros. If A is +/-0, the first two transformations will
4796 change the sign of the result (from +0 to -0, or vice
4797 versa). The last four will fix the sign of the result,
4798 even though the original expressions could be positive or
4799 negative, depending on the sign of A.
4800
4801 Note that all these transformations are correct if A is
4802 NaN, since the two alternatives (A and -A) are also NaNs. */
4803
4804 (for cnd (cond vec_cond)
4805 /* A == 0 ? A : -A same as -A */
4806 (for cmp (eq uneq)
4807 (simplify
4808 (cnd (cmp @0 zerop) @0 (negate@1 @0))
4809 (if (!HONOR_SIGNED_ZEROS (type))
4810 @1))
4811 (simplify
4812 (cnd (cmp @0 zerop) zerop (negate@1 @0))
4813 (if (!HONOR_SIGNED_ZEROS (type))
4814 @1))
4815 )
4816 /* A != 0 ? A : -A same as A */
4817 (for cmp (ne ltgt)
4818 (simplify
4819 (cnd (cmp @0 zerop) @0 (negate @0))
4820 (if (!HONOR_SIGNED_ZEROS (type))
4821 @0))
4822 (simplify
4823 (cnd (cmp @0 zerop) @0 integer_zerop)
4824 (if (!HONOR_SIGNED_ZEROS (type))
4825 @0))
4826 )
4827 /* A >=/> 0 ? A : -A same as abs (A) */
4828 (for cmp (ge gt)
4829 (simplify
4830 (cnd (cmp @0 zerop) @0 (negate @0))
4831 (if (!HONOR_SIGNED_ZEROS (type)
4832 && !TYPE_UNSIGNED (type))
4833 (abs @0))))
4834 /* A <=/< 0 ? A : -A same as -abs (A) */
4835 (for cmp (le lt)
4836 (simplify
4837 (cnd (cmp @0 zerop) @0 (negate @0))
4838 (if (!HONOR_SIGNED_ZEROS (type)
4839 && !TYPE_UNSIGNED (type))
4840 (if (ANY_INTEGRAL_TYPE_P (type)
4841 && !TYPE_OVERFLOW_WRAPS (type))
4842 (with {
4843 tree utype = unsigned_type_for (type);
4844 }
4845 (convert (negate (absu:utype @0))))
4846 (negate (abs @0)))))
4847 )
4848 )
4849
4850 /* -(type)!A -> (type)A - 1. */
4851 (simplify
4852 (negate (convert?:s (logical_inverted_value:s @0)))
4853 (if (INTEGRAL_TYPE_P (type)
4854 && TREE_CODE (type) != BOOLEAN_TYPE
4855 && TYPE_PRECISION (type) > 1
4856 && TREE_CODE (@0) == SSA_NAME
4857 && ssa_name_has_boolean_range (@0))
4858 (plus (convert:type @0) { build_all_ones_cst (type); })))
4859
4860 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
4861 return all -1 or all 0 results. */
4862 /* ??? We could instead convert all instances of the vec_cond to negate,
4863 but that isn't necessarily a win on its own. */
4864 (simplify
4865 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
4866 (if (VECTOR_TYPE_P (type)
4867 && known_eq (TYPE_VECTOR_SUBPARTS (type),
4868 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
4869 && (TYPE_MODE (TREE_TYPE (type))
4870 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
4871 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
4872
4873 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
4874 (simplify
4875 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
4876 (if (VECTOR_TYPE_P (type)
4877 && known_eq (TYPE_VECTOR_SUBPARTS (type),
4878 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
4879 && (TYPE_MODE (TREE_TYPE (type))
4880 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
4881 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
4882
4883
4884 /* Simplifications of comparisons. */
4885
4886 /* See if we can reduce the magnitude of a constant involved in a
4887 comparison by changing the comparison code. This is a canonicalization
4888 formerly done by maybe_canonicalize_comparison_1. */
4889 (for cmp (le gt)
4890 acmp (lt ge)
4891 (simplify
4892 (cmp @0 uniform_integer_cst_p@1)
4893 (with { tree cst = uniform_integer_cst_p (@1); }
4894 (if (tree_int_cst_sgn (cst) == -1)
4895 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
4896 wide_int_to_tree (TREE_TYPE (cst),
4897 wi::to_wide (cst)
4898 + 1)); })))))
4899 (for cmp (ge lt)
4900 acmp (gt le)
4901 (simplify
4902 (cmp @0 uniform_integer_cst_p@1)
4903 (with { tree cst = uniform_integer_cst_p (@1); }
4904 (if (tree_int_cst_sgn (cst) == 1)
4905 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
4906 wide_int_to_tree (TREE_TYPE (cst),
4907 wi::to_wide (cst) - 1)); })))))
4908
4909 /* We can simplify a logical negation of a comparison to the
4910 inverted comparison. As we cannot compute an expression
4911 operator using invert_tree_comparison we have to simulate
4912 that with expression code iteration. */
4913 (for cmp (tcc_comparison)
4914 icmp (inverted_tcc_comparison)
4915 ncmp (inverted_tcc_comparison_with_nans)
4916 /* Ideally we'd like to combine the following two patterns
4917 and handle some more cases by using
4918 (logical_inverted_value (cmp @0 @1))
4919 here but for that genmatch would need to "inline" that.
4920 For now implement what forward_propagate_comparison did. */
4921 (simplify
4922 (bit_not (cmp @0 @1))
4923 (if (VECTOR_TYPE_P (type)
4924 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
4925 /* Comparison inversion may be impossible for trapping math,
4926 invert_tree_comparison will tell us. But we can't use
4927 a computed operator in the replacement tree thus we have
4928 to play the trick below. */
4929 (with { enum tree_code ic = invert_tree_comparison
4930 (cmp, HONOR_NANS (@0)); }
4931 (if (ic == icmp)
4932 (icmp @0 @1)
4933 (if (ic == ncmp)
4934 (ncmp @0 @1))))))
4935 (simplify
4936 (bit_xor (cmp @0 @1) integer_truep)
4937 (with { enum tree_code ic = invert_tree_comparison
4938 (cmp, HONOR_NANS (@0)); }
4939 (if (ic == icmp)
4940 (icmp @0 @1)
4941 (if (ic == ncmp)
4942 (ncmp @0 @1)))))
4943 /* The following bits are handled by fold_binary_op_with_conditional_arg. */
4944 (simplify
4945 (ne (cmp@2 @0 @1) integer_zerop)
4946 (if (types_match (type, TREE_TYPE (@2)))
4947 (cmp @0 @1)))
4948 (simplify
4949 (eq (cmp@2 @0 @1) integer_truep)
4950 (if (types_match (type, TREE_TYPE (@2)))
4951 (cmp @0 @1)))
4952 (simplify
4953 (ne (cmp@2 @0 @1) integer_truep)
4954 (if (types_match (type, TREE_TYPE (@2)))
4955 (with { enum tree_code ic = invert_tree_comparison
4956 (cmp, HONOR_NANS (@0)); }
4957 (if (ic == icmp)
4958 (icmp @0 @1)
4959 (if (ic == ncmp)
4960 (ncmp @0 @1))))))
4961 (simplify
4962 (eq (cmp@2 @0 @1) integer_zerop)
4963 (if (types_match (type, TREE_TYPE (@2)))
4964 (with { enum tree_code ic = invert_tree_comparison
4965 (cmp, HONOR_NANS (@0)); }
4966 (if (ic == icmp)
4967 (icmp @0 @1)
4968 (if (ic == ncmp)
4969 (ncmp @0 @1)))))))
4970
4971 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
4972 ??? The transformation is valid for the other operators if overflow
4973 is undefined for the type, but performing it here badly interacts
4974 with the transformation in fold_cond_expr_with_comparison which
4975 attempts to synthetize ABS_EXPR. */
4976 (for cmp (eq ne)
4977 (for sub (minus pointer_diff)
4978 (simplify
4979 (cmp (sub@2 @0 @1) integer_zerop)
4980 (if (single_use (@2))
4981 (cmp @0 @1)))))
4982
4983 /* Simplify (x < 0) ^ (y < 0) to (x ^ y) < 0 and
4984 (x >= 0) ^ (y >= 0) to (x ^ y) < 0. */
4985 (for cmp (lt ge)
4986 (simplify
4987 (bit_xor (cmp:s @0 integer_zerop) (cmp:s @1 integer_zerop))
4988 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4989 && !TYPE_UNSIGNED (TREE_TYPE (@0))
4990 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4991 (lt (bit_xor @0 @1) { build_zero_cst (TREE_TYPE (@0)); }))))
4992 /* Simplify (x < 0) ^ (y >= 0) to (x ^ y) >= 0 and
4993 (x >= 0) ^ (y < 0) to (x ^ y) >= 0. */
4994 (simplify
4995 (bit_xor:c (lt:s @0 integer_zerop) (ge:s @1 integer_zerop))
4996 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4997 && !TYPE_UNSIGNED (TREE_TYPE (@0))
4998 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4999 (ge (bit_xor @0 @1) { build_zero_cst (TREE_TYPE (@0)); })))
5000
5001 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
5002 signed arithmetic case. That form is created by the compiler
5003 often enough for folding it to be of value. One example is in
5004 computing loop trip counts after Operator Strength Reduction. */
5005 (for cmp (simple_comparison)
5006 scmp (swapped_simple_comparison)
5007 (simplify
5008 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
5009 /* Handle unfolded multiplication by zero. */
5010 (if (integer_zerop (@1))
5011 (cmp @1 @2)
5012 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5013 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5014 && single_use (@3))
5015 /* If @1 is negative we swap the sense of the comparison. */
5016 (if (tree_int_cst_sgn (@1) < 0)
5017 (scmp @0 @2)
5018 (cmp @0 @2))))))
5019
5020 /* For integral types with undefined overflow fold
5021 x * C1 == C2 into x == C2 / C1 or false.
5022 If overflow wraps and C1 is odd, simplify to x == C2 / C1 in the ring
5023 Z / 2^n Z. */
5024 (for cmp (eq ne)
5025 (simplify
5026 (cmp (mult @0 INTEGER_CST@1) INTEGER_CST@2)
5027 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5028 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5029 && wi::to_wide (@1) != 0)
5030 (with { widest_int quot; }
5031 (if (wi::multiple_of_p (wi::to_widest (@2), wi::to_widest (@1),
5032 TYPE_SIGN (TREE_TYPE (@0)), &quot))
5033 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), quot); })
5034 { constant_boolean_node (cmp == NE_EXPR, type); }))
5035 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5036 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
5037 && (wi::bit_and (wi::to_wide (@1), 1) == 1))
5038 (cmp @0
5039 {
5040 tree itype = TREE_TYPE (@0);
5041 int p = TYPE_PRECISION (itype);
5042 wide_int m = wi::one (p + 1) << p;
5043 wide_int a = wide_int::from (wi::to_wide (@1), p + 1, UNSIGNED);
5044 wide_int i = wide_int::from (wi::mod_inv (a, m),
5045 p, TYPE_SIGN (itype));
5046 wide_int_to_tree (itype, wi::mul (i, wi::to_wide (@2)));
5047 })))))
5048
5049 /* Simplify comparison of something with itself. For IEEE
5050 floating-point, we can only do some of these simplifications. */
5051 (for cmp (eq ge le)
5052 (simplify
5053 (cmp @0 @0)
5054 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
5055 || ! tree_expr_maybe_nan_p (@0))
5056 { constant_boolean_node (true, type); }
5057 (if (cmp != EQ_EXPR
5058 /* With -ftrapping-math conversion to EQ loses an exception. */
5059 && (! FLOAT_TYPE_P (TREE_TYPE (@0))
5060 || ! flag_trapping_math))
5061 (eq @0 @0)))))
5062 (for cmp (ne gt lt)
5063 (simplify
5064 (cmp @0 @0)
5065 (if (cmp != NE_EXPR
5066 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
5067 || ! tree_expr_maybe_nan_p (@0))
5068 { constant_boolean_node (false, type); })))
5069 (for cmp (unle unge uneq)
5070 (simplify
5071 (cmp @0 @0)
5072 { constant_boolean_node (true, type); }))
5073 (for cmp (unlt ungt)
5074 (simplify
5075 (cmp @0 @0)
5076 (unordered @0 @0)))
5077 (simplify
5078 (ltgt @0 @0)
5079 (if (!flag_trapping_math || !tree_expr_maybe_nan_p (@0))
5080 { constant_boolean_node (false, type); }))
5081
5082 /* x == ~x -> false */
5083 /* x != ~x -> true */
5084 (for cmp (eq ne)
5085 (simplify
5086 (cmp:c @0 (bit_not @0))
5087 { constant_boolean_node (cmp == NE_EXPR, type); }))
5088
5089 /* Fold ~X op ~Y as Y op X. */
5090 (for cmp (simple_comparison)
5091 (simplify
5092 (cmp (bit_not@2 @0) (bit_not@3 @1))
5093 (if (single_use (@2) && single_use (@3))
5094 (cmp @1 @0))))
5095
5096 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
5097 (for cmp (simple_comparison)
5098 scmp (swapped_simple_comparison)
5099 (simplify
5100 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
5101 (if (single_use (@2)
5102 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
5103 (scmp @0 (bit_not @1)))))
5104
5105 (for cmp (simple_comparison)
5106 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
5107 (simplify
5108 (cmp (convert@2 @0) (convert? @1))
5109 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
5110 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
5111 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
5112 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
5113 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
5114 (with
5115 {
5116 tree type1 = TREE_TYPE (@1);
5117 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
5118 {
5119 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
5120 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
5121 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
5122 type1 = float_type_node;
5123 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
5124 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
5125 type1 = double_type_node;
5126 }
5127 tree newtype
5128 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
5129 ? TREE_TYPE (@0) : type1);
5130 }
5131 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
5132 (cmp (convert:newtype @0) (convert:newtype @1))))))
5133
5134 (simplify
5135 (cmp @0 REAL_CST@1)
5136 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
5137 (switch
5138 /* a CMP (-0) -> a CMP 0 */
5139 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
5140 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
5141 /* (-0) CMP b -> 0 CMP b. */
5142 (if (TREE_CODE (@0) == REAL_CST
5143 && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@0)))
5144 (cmp { build_real (TREE_TYPE (@0), dconst0); } @1))
5145 /* x != NaN is always true, other ops are always false. */
5146 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
5147 && !tree_expr_signaling_nan_p (@1)
5148 && !tree_expr_maybe_signaling_nan_p (@0))
5149 { constant_boolean_node (cmp == NE_EXPR, type); })
5150 /* NaN != y is always true, other ops are always false. */
5151 (if (TREE_CODE (@0) == REAL_CST
5152 && REAL_VALUE_ISNAN (TREE_REAL_CST (@0))
5153 && !tree_expr_signaling_nan_p (@0)
5154 && !tree_expr_signaling_nan_p (@1))
5155 { constant_boolean_node (cmp == NE_EXPR, type); })
5156 /* Fold comparisons against infinity. */
5157 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
5158 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
5159 (with
5160 {
5161 REAL_VALUE_TYPE max;
5162 enum tree_code code = cmp;
5163 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
5164 if (neg)
5165 code = swap_tree_comparison (code);
5166 }
5167 (switch
5168 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
5169 (if (code == GT_EXPR
5170 && !(HONOR_NANS (@0) && flag_trapping_math))
5171 { constant_boolean_node (false, type); })
5172 (if (code == LE_EXPR)
5173 /* x <= +Inf is always true, if we don't care about NaNs. */
5174 (if (! HONOR_NANS (@0))
5175 { constant_boolean_node (true, type); }
5176 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
5177 an "invalid" exception. */
5178 (if (!flag_trapping_math)
5179 (eq @0 @0))))
5180 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
5181 for == this introduces an exception for x a NaN. */
5182 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
5183 || code == GE_EXPR)
5184 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
5185 (if (neg)
5186 (lt @0 { build_real (TREE_TYPE (@0), max); })
5187 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
5188 /* x < +Inf is always equal to x <= DBL_MAX. */
5189 (if (code == LT_EXPR)
5190 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
5191 (if (neg)
5192 (ge @0 { build_real (TREE_TYPE (@0), max); })
5193 (le @0 { build_real (TREE_TYPE (@0), max); }))))
5194 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
5195 an exception for x a NaN so use an unordered comparison. */
5196 (if (code == NE_EXPR)
5197 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
5198 (if (! HONOR_NANS (@0))
5199 (if (neg)
5200 (ge @0 { build_real (TREE_TYPE (@0), max); })
5201 (le @0 { build_real (TREE_TYPE (@0), max); }))
5202 (if (neg)
5203 (unge @0 { build_real (TREE_TYPE (@0), max); })
5204 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
5205
5206 /* If this is a comparison of a real constant with a PLUS_EXPR
5207 or a MINUS_EXPR of a real constant, we can convert it into a
5208 comparison with a revised real constant as long as no overflow
5209 occurs when unsafe_math_optimizations are enabled. */
5210 (if (flag_unsafe_math_optimizations)
5211 (for op (plus minus)
5212 (simplify
5213 (cmp (op @0 REAL_CST@1) REAL_CST@2)
5214 (with
5215 {
5216 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
5217 TREE_TYPE (@1), @2, @1);
5218 }
5219 (if (tem && !TREE_OVERFLOW (tem))
5220 (cmp @0 { tem; }))))))
5221
5222 /* Likewise, we can simplify a comparison of a real constant with
5223 a MINUS_EXPR whose first operand is also a real constant, i.e.
5224 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
5225 floating-point types only if -fassociative-math is set. */
5226 (if (flag_associative_math)
5227 (simplify
5228 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
5229 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
5230 (if (tem && !TREE_OVERFLOW (tem))
5231 (cmp { tem; } @1)))))
5232
5233 /* Fold comparisons against built-in math functions. */
5234 (if (flag_unsafe_math_optimizations && ! flag_errno_math)
5235 (for sq (SQRT)
5236 (simplify
5237 (cmp (sq @0) REAL_CST@1)
5238 (switch
5239 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
5240 (switch
5241 /* sqrt(x) < y is always false, if y is negative. */
5242 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
5243 { constant_boolean_node (false, type); })
5244 /* sqrt(x) > y is always true, if y is negative and we
5245 don't care about NaNs, i.e. negative values of x. */
5246 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
5247 { constant_boolean_node (true, type); })
5248 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
5249 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
5250 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
5251 (switch
5252 /* sqrt(x) < 0 is always false. */
5253 (if (cmp == LT_EXPR)
5254 { constant_boolean_node (false, type); })
5255 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
5256 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
5257 { constant_boolean_node (true, type); })
5258 /* sqrt(x) <= 0 -> x == 0. */
5259 (if (cmp == LE_EXPR)
5260 (eq @0 @1))
5261 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
5262 == or !=. In the last case:
5263
5264 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
5265
5266 if x is negative or NaN. Due to -funsafe-math-optimizations,
5267 the results for other x follow from natural arithmetic. */
5268 (cmp @0 @1)))
5269 (if ((cmp == LT_EXPR
5270 || cmp == LE_EXPR
5271 || cmp == GT_EXPR
5272 || cmp == GE_EXPR)
5273 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
5274 /* Give up for -frounding-math. */
5275 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0)))
5276 (with
5277 {
5278 REAL_VALUE_TYPE c2;
5279 enum tree_code ncmp = cmp;
5280 const real_format *fmt
5281 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)));
5282 real_arithmetic (&c2, MULT_EXPR,
5283 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
5284 real_convert (&c2, fmt, &c2);
5285 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c),
5286 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */
5287 if (!REAL_VALUE_ISINF (c2))
5288 {
5289 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
5290 build_real (TREE_TYPE (@0), c2));
5291 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
5292 ncmp = ERROR_MARK;
5293 else if ((cmp == LT_EXPR || cmp == GE_EXPR)
5294 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1)))
5295 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR;
5296 else if ((cmp == LE_EXPR || cmp == GT_EXPR)
5297 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3)))
5298 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR;
5299 else
5300 {
5301 /* With rounding to even, sqrt of up to 3 different values
5302 gives the same normal result, so in some cases c2 needs
5303 to be adjusted. */
5304 REAL_VALUE_TYPE c2alt, tow;
5305 if (cmp == LT_EXPR || cmp == GE_EXPR)
5306 tow = dconst0;
5307 else
5308 tow = dconstinf;
5309 real_nextafter (&c2alt, fmt, &c2, &tow);
5310 real_convert (&c2alt, fmt, &c2alt);
5311 if (REAL_VALUE_ISINF (c2alt))
5312 ncmp = ERROR_MARK;
5313 else
5314 {
5315 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
5316 build_real (TREE_TYPE (@0), c2alt));
5317 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
5318 ncmp = ERROR_MARK;
5319 else if (real_equal (&TREE_REAL_CST (c3),
5320 &TREE_REAL_CST (@1)))
5321 c2 = c2alt;
5322 }
5323 }
5324 }
5325 }
5326 (if (cmp == GT_EXPR || cmp == GE_EXPR)
5327 (if (REAL_VALUE_ISINF (c2))
5328 /* sqrt(x) > y is x == +Inf, when y is very large. */
5329 (if (HONOR_INFINITIES (@0))
5330 (eq @0 { build_real (TREE_TYPE (@0), c2); })
5331 { constant_boolean_node (false, type); })
5332 /* sqrt(x) > c is the same as x > c*c. */
5333 (if (ncmp != ERROR_MARK)
5334 (if (ncmp == GE_EXPR)
5335 (ge @0 { build_real (TREE_TYPE (@0), c2); })
5336 (gt @0 { build_real (TREE_TYPE (@0), c2); }))))
5337 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */
5338 (if (REAL_VALUE_ISINF (c2))
5339 (switch
5340 /* sqrt(x) < y is always true, when y is a very large
5341 value and we don't care about NaNs or Infinities. */
5342 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
5343 { constant_boolean_node (true, type); })
5344 /* sqrt(x) < y is x != +Inf when y is very large and we
5345 don't care about NaNs. */
5346 (if (! HONOR_NANS (@0))
5347 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
5348 /* sqrt(x) < y is x >= 0 when y is very large and we
5349 don't care about Infinities. */
5350 (if (! HONOR_INFINITIES (@0))
5351 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
5352 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
5353 (if (GENERIC)
5354 (truth_andif
5355 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
5356 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
5357 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
5358 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0))
5359 (if (ncmp == LT_EXPR)
5360 (lt @0 { build_real (TREE_TYPE (@0), c2); })
5361 (le @0 { build_real (TREE_TYPE (@0), c2); }))
5362 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
5363 (if (ncmp != ERROR_MARK && GENERIC)
5364 (if (ncmp == LT_EXPR)
5365 (truth_andif
5366 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
5367 (lt @0 { build_real (TREE_TYPE (@0), c2); }))
5368 (truth_andif
5369 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
5370 (le @0 { build_real (TREE_TYPE (@0), c2); })))))))))))
5371 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
5372 (simplify
5373 (cmp (sq @0) (sq @1))
5374 (if (! HONOR_NANS (@0))
5375 (cmp @0 @1))))))
5376
5377 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
5378 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
5379 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
5380 (simplify
5381 (cmp (float@0 @1) (float @2))
5382 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
5383 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
5384 (with
5385 {
5386 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
5387 tree type1 = TREE_TYPE (@1);
5388 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
5389 tree type2 = TREE_TYPE (@2);
5390 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
5391 }
5392 (if (fmt.can_represent_integral_type_p (type1)
5393 && fmt.can_represent_integral_type_p (type2))
5394 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
5395 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
5396 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
5397 && type1_signed_p >= type2_signed_p)
5398 (icmp @1 (convert @2))
5399 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
5400 && type1_signed_p <= type2_signed_p)
5401 (icmp (convert:type2 @1) @2)
5402 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
5403 && type1_signed_p == type2_signed_p)
5404 (icmp @1 @2))))))))))
5405
5406 /* Optimize various special cases of (FTYPE) N CMP CST. */
5407 (for cmp (lt le eq ne ge gt)
5408 icmp (le le eq ne ge ge)
5409 (simplify
5410 (cmp (float @0) REAL_CST@1)
5411 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
5412 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
5413 (with
5414 {
5415 tree itype = TREE_TYPE (@0);
5416 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
5417 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
5418 /* Be careful to preserve any potential exceptions due to
5419 NaNs. qNaNs are ok in == or != context.
5420 TODO: relax under -fno-trapping-math or
5421 -fno-signaling-nans. */
5422 bool exception_p
5423 = real_isnan (cst) && (cst->signalling
5424 || (cmp != EQ_EXPR && cmp != NE_EXPR));
5425 }
5426 /* TODO: allow non-fitting itype and SNaNs when
5427 -fno-trapping-math. */
5428 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
5429 (with
5430 {
5431 signop isign = TYPE_SIGN (itype);
5432 REAL_VALUE_TYPE imin, imax;
5433 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
5434 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
5435
5436 REAL_VALUE_TYPE icst;
5437 if (cmp == GT_EXPR || cmp == GE_EXPR)
5438 real_ceil (&icst, fmt, cst);
5439 else if (cmp == LT_EXPR || cmp == LE_EXPR)
5440 real_floor (&icst, fmt, cst);
5441 else
5442 real_trunc (&icst, fmt, cst);
5443
5444 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
5445
5446 bool overflow_p = false;
5447 wide_int icst_val
5448 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
5449 }
5450 (switch
5451 /* Optimize cases when CST is outside of ITYPE's range. */
5452 (if (real_compare (LT_EXPR, cst, &imin))
5453 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
5454 type); })
5455 (if (real_compare (GT_EXPR, cst, &imax))
5456 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
5457 type); })
5458 /* Remove cast if CST is an integer representable by ITYPE. */
5459 (if (cst_int_p)
5460 (cmp @0 { gcc_assert (!overflow_p);
5461 wide_int_to_tree (itype, icst_val); })
5462 )
5463 /* When CST is fractional, optimize
5464 (FTYPE) N == CST -> 0
5465 (FTYPE) N != CST -> 1. */
5466 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
5467 { constant_boolean_node (cmp == NE_EXPR, type); })
5468 /* Otherwise replace with sensible integer constant. */
5469 (with
5470 {
5471 gcc_checking_assert (!overflow_p);
5472 }
5473 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
5474
5475 /* Fold A /[ex] B CMP C to A CMP B * C. */
5476 (for cmp (eq ne)
5477 (simplify
5478 (cmp (exact_div @0 @1) INTEGER_CST@2)
5479 (if (!integer_zerop (@1))
5480 (if (wi::to_wide (@2) == 0)
5481 (cmp @0 @2)
5482 (if (TREE_CODE (@1) == INTEGER_CST)
5483 (with
5484 {
5485 wi::overflow_type ovf;
5486 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
5487 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
5488 }
5489 (if (ovf)
5490 { constant_boolean_node (cmp == NE_EXPR, type); }
5491 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
5492 (for cmp (lt le gt ge)
5493 (simplify
5494 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
5495 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
5496 (with
5497 {
5498 wi::overflow_type ovf;
5499 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
5500 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
5501 }
5502 (if (ovf)
5503 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
5504 TYPE_SIGN (TREE_TYPE (@2)))
5505 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
5506 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
5507
5508 /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
5509
5510 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
5511 For large C (more than min/B+2^size), this is also true, with the
5512 multiplication computed modulo 2^size.
5513 For intermediate C, this just tests the sign of A. */
5514 (for cmp (lt le gt ge)
5515 cmp2 (ge ge lt lt)
5516 (simplify
5517 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
5518 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
5519 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
5520 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
5521 (with
5522 {
5523 tree utype = TREE_TYPE (@2);
5524 wide_int denom = wi::to_wide (@1);
5525 wide_int right = wi::to_wide (@2);
5526 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
5527 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
5528 bool small = wi::leu_p (right, smax);
5529 bool large = wi::geu_p (right, smin);
5530 }
5531 (if (small || large)
5532 (cmp (convert:utype @0) (mult @2 (convert @1)))
5533 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
5534
5535 /* Unordered tests if either argument is a NaN. */
5536 (simplify
5537 (bit_ior (unordered @0 @0) (unordered @1 @1))
5538 (if (types_match (@0, @1))
5539 (unordered @0 @1)))
5540 (simplify
5541 (bit_and (ordered @0 @0) (ordered @1 @1))
5542 (if (types_match (@0, @1))
5543 (ordered @0 @1)))
5544 (simplify
5545 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
5546 @2)
5547 (simplify
5548 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
5549 @2)
5550
5551 /* Simple range test simplifications. */
5552 /* A < B || A >= B -> true. */
5553 (for test1 (lt le le le ne ge)
5554 test2 (ge gt ge ne eq ne)
5555 (simplify
5556 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
5557 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5558 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
5559 { constant_boolean_node (true, type); })))
5560 /* A < B && A >= B -> false. */
5561 (for test1 (lt lt lt le ne eq)
5562 test2 (ge gt eq gt eq gt)
5563 (simplify
5564 (bit_and:c (test1 @0 @1) (test2 @0 @1))
5565 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5566 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
5567 { constant_boolean_node (false, type); })))
5568
5569 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
5570 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
5571
5572 Note that comparisons
5573 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
5574 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
5575 will be canonicalized to above so there's no need to
5576 consider them here.
5577 */
5578
5579 (for cmp (le gt)
5580 eqcmp (eq ne)
5581 (simplify
5582 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
5583 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
5584 (with
5585 {
5586 tree ty = TREE_TYPE (@0);
5587 unsigned prec = TYPE_PRECISION (ty);
5588 wide_int mask = wi::to_wide (@2, prec);
5589 wide_int rhs = wi::to_wide (@3, prec);
5590 signop sgn = TYPE_SIGN (ty);
5591 }
5592 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
5593 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
5594 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
5595 { build_zero_cst (ty); }))))))
5596
5597 /* -A CMP -B -> B CMP A. */
5598 (for cmp (tcc_comparison)
5599 scmp (swapped_tcc_comparison)
5600 (simplify
5601 (cmp (negate @0) (negate @1))
5602 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
5603 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5604 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
5605 (scmp @0 @1)))
5606 (simplify
5607 (cmp (negate @0) CONSTANT_CLASS_P@1)
5608 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
5609 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5610 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
5611 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
5612 (if (tem && !TREE_OVERFLOW (tem))
5613 (scmp @0 { tem; }))))))
5614
5615 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
5616 (for op (eq ne)
5617 (simplify
5618 (op (abs @0) zerop@1)
5619 (op @0 @1)))
5620
5621 /* From fold_sign_changed_comparison and fold_widened_comparison.
5622 FIXME: the lack of symmetry is disturbing. */
5623 (for cmp (simple_comparison)
5624 (simplify
5625 (cmp (convert@0 @00) (convert?@1 @10))
5626 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5627 /* Disable this optimization if we're casting a function pointer
5628 type on targets that require function pointer canonicalization. */
5629 && !(targetm.have_canonicalize_funcptr_for_compare ()
5630 && ((POINTER_TYPE_P (TREE_TYPE (@00))
5631 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
5632 || (POINTER_TYPE_P (TREE_TYPE (@10))
5633 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
5634 && single_use (@0))
5635 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
5636 && (TREE_CODE (@10) == INTEGER_CST
5637 || @1 != @10)
5638 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
5639 || cmp == NE_EXPR
5640 || cmp == EQ_EXPR)
5641 && !POINTER_TYPE_P (TREE_TYPE (@00))
5642 /* (int)bool:32 != (int)uint is not the same as
5643 bool:32 != (bool:32)uint since boolean types only have two valid
5644 values independent of their precision. */
5645 && (TREE_CODE (TREE_TYPE (@00)) != BOOLEAN_TYPE
5646 || TREE_CODE (TREE_TYPE (@10)) == BOOLEAN_TYPE))
5647 /* ??? The special-casing of INTEGER_CST conversion was in the original
5648 code and here to avoid a spurious overflow flag on the resulting
5649 constant which fold_convert produces. */
5650 (if (TREE_CODE (@1) == INTEGER_CST)
5651 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
5652 TREE_OVERFLOW (@1)); })
5653 (cmp @00 (convert @1)))
5654
5655 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
5656 /* If possible, express the comparison in the shorter mode. */
5657 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
5658 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
5659 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
5660 && TYPE_UNSIGNED (TREE_TYPE (@00))))
5661 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
5662 || ((TYPE_PRECISION (TREE_TYPE (@00))
5663 >= TYPE_PRECISION (TREE_TYPE (@10)))
5664 && (TYPE_UNSIGNED (TREE_TYPE (@00))
5665 == TYPE_UNSIGNED (TREE_TYPE (@10))))
5666 || (TREE_CODE (@10) == INTEGER_CST
5667 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
5668 && int_fits_type_p (@10, TREE_TYPE (@00)))))
5669 (cmp @00 (convert @10))
5670 (if (TREE_CODE (@10) == INTEGER_CST
5671 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
5672 && !int_fits_type_p (@10, TREE_TYPE (@00)))
5673 (with
5674 {
5675 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
5676 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
5677 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
5678 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
5679 }
5680 (if (above || below)
5681 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
5682 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
5683 (if (cmp == LT_EXPR || cmp == LE_EXPR)
5684 { constant_boolean_node (above ? true : false, type); }
5685 (if (cmp == GT_EXPR || cmp == GE_EXPR)
5686 { constant_boolean_node (above ? false : true, type); }))))))))))))
5687
5688 (for cmp (eq ne)
5689 (simplify
5690 /* SSA names are canonicalized to 2nd place. */
5691 (cmp addr@0 SSA_NAME@1)
5692 (with
5693 { poly_int64 off; tree base; }
5694 /* A local variable can never be pointed to by
5695 the default SSA name of an incoming parameter. */
5696 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
5697 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL
5698 && (base = get_base_address (TREE_OPERAND (@0, 0)))
5699 && TREE_CODE (base) == VAR_DECL
5700 && auto_var_in_fn_p (base, current_function_decl))
5701 (if (cmp == NE_EXPR)
5702 { constant_boolean_node (true, type); }
5703 { constant_boolean_node (false, type); })
5704 /* If the address is based on @1 decide using the offset. */
5705 (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off))
5706 && TREE_CODE (base) == MEM_REF
5707 && TREE_OPERAND (base, 0) == @1)
5708 (with { off += mem_ref_offset (base).force_shwi (); }
5709 (if (known_ne (off, 0))
5710 { constant_boolean_node (cmp == NE_EXPR, type); }
5711 (if (known_eq (off, 0))
5712 { constant_boolean_node (cmp == EQ_EXPR, type); }))))))))
5713
5714 /* Equality compare simplifications from fold_binary */
5715 (for cmp (eq ne)
5716
5717 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
5718 Similarly for NE_EXPR. */
5719 (simplify
5720 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
5721 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
5722 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
5723 { constant_boolean_node (cmp == NE_EXPR, type); }))
5724
5725 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
5726 (simplify
5727 (cmp (bit_xor @0 @1) integer_zerop)
5728 (cmp @0 @1))
5729
5730 /* (X ^ Y) == Y becomes X == 0.
5731 Likewise (X ^ Y) == X becomes Y == 0. */
5732 (simplify
5733 (cmp:c (bit_xor:c @0 @1) @0)
5734 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
5735
5736 /* (X & Y) == X becomes (X & ~Y) == 0. */
5737 (simplify
5738 (cmp:c (bit_and:c @0 @1) @0)
5739 (cmp (bit_and @0 (bit_not! @1)) { build_zero_cst (TREE_TYPE (@0)); }))
5740 (simplify
5741 (cmp:c (convert@3 (bit_and (convert@2 @0) INTEGER_CST@1)) (convert @0))
5742 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5743 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
5744 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
5745 && TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@0))
5746 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@2))
5747 && !wi::neg_p (wi::to_wide (@1)))
5748 (cmp (bit_and @0 (convert (bit_not @1)))
5749 { build_zero_cst (TREE_TYPE (@0)); })))
5750
5751 /* (X | Y) == Y becomes (X & ~Y) == 0. */
5752 (simplify
5753 (cmp:c (bit_ior:c @0 @1) @1)
5754 (cmp (bit_and @0 (bit_not! @1)) { build_zero_cst (TREE_TYPE (@0)); }))
5755
5756 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
5757 (simplify
5758 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
5759 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
5760 (cmp @0 (bit_xor @1 (convert @2)))))
5761
5762 (simplify
5763 (cmp (convert? addr@0) integer_zerop)
5764 (if (tree_single_nonzero_warnv_p (@0, NULL))
5765 { constant_boolean_node (cmp == NE_EXPR, type); }))
5766
5767 /* (X & C) op (Y & C) into (X ^ Y) & C op 0. */
5768 (simplify
5769 (cmp (bit_and:cs @0 @2) (bit_and:cs @1 @2))
5770 (cmp (bit_and (bit_xor @0 @1) @2) { build_zero_cst (TREE_TYPE (@2)); })))
5771
5772 /* (X < 0) != (Y < 0) into (X ^ Y) < 0.
5773 (X >= 0) != (Y >= 0) into (X ^ Y) < 0.
5774 (X < 0) == (Y < 0) into (X ^ Y) >= 0.
5775 (X >= 0) == (Y >= 0) into (X ^ Y) >= 0. */
5776 (for cmp (eq ne)
5777 ncmp (ge lt)
5778 (for sgncmp (ge lt)
5779 (simplify
5780 (cmp (sgncmp @0 integer_zerop@2) (sgncmp @1 integer_zerop))
5781 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5782 && !TYPE_UNSIGNED (TREE_TYPE (@0))
5783 && types_match (@0, @1))
5784 (ncmp (bit_xor @0 @1) @2)))))
5785 /* (X < 0) == (Y >= 0) into (X ^ Y) < 0.
5786 (X < 0) != (Y >= 0) into (X ^ Y) >= 0. */
5787 (for cmp (eq ne)
5788 ncmp (lt ge)
5789 (simplify
5790 (cmp:c (lt @0 integer_zerop@2) (ge @1 integer_zerop))
5791 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
5792 && !TYPE_UNSIGNED (TREE_TYPE (@0))
5793 && types_match (@0, @1))
5794 (ncmp (bit_xor @0 @1) @2))))
5795
5796 /* If we have (A & C) == C where C is a power of 2, convert this into
5797 (A & C) != 0. Similarly for NE_EXPR. */
5798 (for cmp (eq ne)
5799 icmp (ne eq)
5800 (simplify
5801 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
5802 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
5803
5804 #if GIMPLE
5805 /* From fold_binary_op_with_conditional_arg handle the case of
5806 rewriting (a ? b : c) > d to a ? (b > d) : (c > d) when the
5807 compares simplify. */
5808 (for cmp (simple_comparison)
5809 (simplify
5810 (cmp:c (cond @0 @1 @2) @3)
5811 /* Do not move possibly trapping operations into the conditional as this
5812 pessimizes code and causes gimplification issues when applied late. */
5813 (if (!FLOAT_TYPE_P (TREE_TYPE (@3))
5814 || !operation_could_trap_p (cmp, true, false, @3))
5815 (cond @0 (cmp! @1 @3) (cmp! @2 @3)))))
5816 #endif
5817
5818 (for cmp (ge lt)
5819 /* x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
5820 /* x >= 0 ? ~y : y into ~((x >> (prec-1)) ^ y). */
5821 (simplify
5822 (cond (cmp @0 integer_zerop) (bit_not @1) @1)
5823 (if (INTEGRAL_TYPE_P (type)
5824 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5825 && !TYPE_UNSIGNED (TREE_TYPE (@0))
5826 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
5827 (with
5828 {
5829 tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
5830 }
5831 (if (cmp == LT_EXPR)
5832 (bit_xor (convert (rshift @0 {shifter;})) @1)
5833 (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1))))))
5834 /* x < 0 ? y : ~y into ~((x >> (prec-1)) ^ y). */
5835 /* x >= 0 ? y : ~y into (x >> (prec-1)) ^ y. */
5836 (simplify
5837 (cond (cmp @0 integer_zerop) @1 (bit_not @1))
5838 (if (INTEGRAL_TYPE_P (type)
5839 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5840 && !TYPE_UNSIGNED (TREE_TYPE (@0))
5841 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
5842 (with
5843 {
5844 tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
5845 }
5846 (if (cmp == GE_EXPR)
5847 (bit_xor (convert (rshift @0 {shifter;})) @1)
5848 (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1)))))))
5849
5850 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
5851 convert this into a shift followed by ANDing with D. */
5852 (simplify
5853 (cond
5854 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
5855 INTEGER_CST@2 integer_zerop)
5856 (if (!POINTER_TYPE_P (type) && integer_pow2p (@2))
5857 (with {
5858 int shift = (wi::exact_log2 (wi::to_wide (@2))
5859 - wi::exact_log2 (wi::to_wide (@1)));
5860 }
5861 (if (shift > 0)
5862 (bit_and
5863 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
5864 (bit_and
5865 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
5866 @2)))))
5867
5868 /* If we have (A & C) != 0 where C is the sign bit of A, convert
5869 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
5870 (for cmp (eq ne)
5871 ncmp (ge lt)
5872 (simplify
5873 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
5874 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5875 && type_has_mode_precision_p (TREE_TYPE (@0))
5876 && element_precision (@2) >= element_precision (@0)
5877 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
5878 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
5879 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
5880
5881 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
5882 this into a right shift or sign extension followed by ANDing with C. */
5883 (simplify
5884 (cond
5885 (lt @0 integer_zerop)
5886 INTEGER_CST@1 integer_zerop)
5887 (if (integer_pow2p (@1)
5888 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
5889 (with {
5890 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
5891 }
5892 (if (shift >= 0)
5893 (bit_and
5894 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
5895 @1)
5896 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
5897 sign extension followed by AND with C will achieve the effect. */
5898 (bit_and (convert @0) @1)))))
5899
5900 /* When the addresses are not directly of decls compare base and offset.
5901 This implements some remaining parts of fold_comparison address
5902 comparisons but still no complete part of it. Still it is good
5903 enough to make fold_stmt not regress when not dispatching to fold_binary. */
5904 (for cmp (simple_comparison)
5905 (simplify
5906 (cmp (convert1?@2 addr@0) (convert2? addr@1))
5907 (with
5908 {
5909 poly_int64 off0, off1;
5910 tree base0, base1;
5911 int equal = address_compare (cmp, TREE_TYPE (@2), @0, @1, base0, base1,
5912 off0, off1, GENERIC);
5913 }
5914 (if (equal == 1)
5915 (switch
5916 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
5917 { constant_boolean_node (known_eq (off0, off1), type); })
5918 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
5919 { constant_boolean_node (known_ne (off0, off1), type); })
5920 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
5921 { constant_boolean_node (known_lt (off0, off1), type); })
5922 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
5923 { constant_boolean_node (known_le (off0, off1), type); })
5924 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
5925 { constant_boolean_node (known_ge (off0, off1), type); })
5926 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
5927 { constant_boolean_node (known_gt (off0, off1), type); }))
5928 (if (equal == 0)
5929 (switch
5930 (if (cmp == EQ_EXPR)
5931 { constant_boolean_node (false, type); })
5932 (if (cmp == NE_EXPR)
5933 { constant_boolean_node (true, type); })))))))
5934
5935 /* Simplify pointer equality compares using PTA. */
5936 (for neeq (ne eq)
5937 (simplify
5938 (neeq @0 @1)
5939 (if (POINTER_TYPE_P (TREE_TYPE (@0))
5940 && ptrs_compare_unequal (@0, @1))
5941 { constant_boolean_node (neeq != EQ_EXPR, type); })))
5942
5943 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
5944 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
5945 Disable the transform if either operand is pointer to function.
5946 This broke pr22051-2.c for arm where function pointer
5947 canonicalizaion is not wanted. */
5948
5949 (for cmp (ne eq)
5950 (simplify
5951 (cmp (convert @0) INTEGER_CST@1)
5952 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
5953 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
5954 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
5955 /* Don't perform this optimization in GENERIC if @0 has reference
5956 type when sanitizing. See PR101210. */
5957 && !(GENERIC
5958 && TREE_CODE (TREE_TYPE (@0)) == REFERENCE_TYPE
5959 && (flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT))))
5960 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5961 && POINTER_TYPE_P (TREE_TYPE (@1))
5962 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
5963 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
5964 (cmp @0 (convert @1)))))
5965
5966 /* Non-equality compare simplifications from fold_binary */
5967 (for cmp (lt gt le ge)
5968 /* Comparisons with the highest or lowest possible integer of
5969 the specified precision will have known values. */
5970 (simplify
5971 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
5972 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
5973 || POINTER_TYPE_P (TREE_TYPE (@1))
5974 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
5975 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
5976 (with
5977 {
5978 tree cst = uniform_integer_cst_p (@1);
5979 tree arg1_type = TREE_TYPE (cst);
5980 unsigned int prec = TYPE_PRECISION (arg1_type);
5981 wide_int max = wi::max_value (arg1_type);
5982 wide_int signed_max = wi::max_value (prec, SIGNED);
5983 wide_int min = wi::min_value (arg1_type);
5984 }
5985 (switch
5986 (if (wi::to_wide (cst) == max)
5987 (switch
5988 (if (cmp == GT_EXPR)
5989 { constant_boolean_node (false, type); })
5990 (if (cmp == GE_EXPR)
5991 (eq @2 @1))
5992 (if (cmp == LE_EXPR)
5993 { constant_boolean_node (true, type); })
5994 (if (cmp == LT_EXPR)
5995 (ne @2 @1))))
5996 (if (wi::to_wide (cst) == min)
5997 (switch
5998 (if (cmp == LT_EXPR)
5999 { constant_boolean_node (false, type); })
6000 (if (cmp == LE_EXPR)
6001 (eq @2 @1))
6002 (if (cmp == GE_EXPR)
6003 { constant_boolean_node (true, type); })
6004 (if (cmp == GT_EXPR)
6005 (ne @2 @1))))
6006 (if (wi::to_wide (cst) == max - 1)
6007 (switch
6008 (if (cmp == GT_EXPR)
6009 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
6010 wide_int_to_tree (TREE_TYPE (cst),
6011 wi::to_wide (cst)
6012 + 1)); }))
6013 (if (cmp == LE_EXPR)
6014 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
6015 wide_int_to_tree (TREE_TYPE (cst),
6016 wi::to_wide (cst)
6017 + 1)); }))))
6018 (if (wi::to_wide (cst) == min + 1)
6019 (switch
6020 (if (cmp == GE_EXPR)
6021 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
6022 wide_int_to_tree (TREE_TYPE (cst),
6023 wi::to_wide (cst)
6024 - 1)); }))
6025 (if (cmp == LT_EXPR)
6026 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
6027 wide_int_to_tree (TREE_TYPE (cst),
6028 wi::to_wide (cst)
6029 - 1)); }))))
6030 (if (wi::to_wide (cst) == signed_max
6031 && TYPE_UNSIGNED (arg1_type)
6032 /* We will flip the signedness of the comparison operator
6033 associated with the mode of @1, so the sign bit is
6034 specified by this mode. Check that @1 is the signed
6035 max associated with this sign bit. */
6036 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
6037 /* signed_type does not work on pointer types. */
6038 && INTEGRAL_TYPE_P (arg1_type))
6039 /* The following case also applies to X < signed_max+1
6040 and X >= signed_max+1 because previous transformations. */
6041 (if (cmp == LE_EXPR || cmp == GT_EXPR)
6042 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
6043 (switch
6044 (if (cst == @1 && cmp == LE_EXPR)
6045 (ge (convert:st @0) { build_zero_cst (st); }))
6046 (if (cst == @1 && cmp == GT_EXPR)
6047 (lt (convert:st @0) { build_zero_cst (st); }))
6048 (if (cmp == LE_EXPR)
6049 (ge (view_convert:st @0) { build_zero_cst (st); }))
6050 (if (cmp == GT_EXPR)
6051 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
6052
6053 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
6054 /* If the second operand is NaN, the result is constant. */
6055 (simplify
6056 (cmp @0 REAL_CST@1)
6057 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
6058 && (cmp != LTGT_EXPR || ! flag_trapping_math))
6059 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
6060 ? false : true, type); })))
6061
6062 /* Fold UNORDERED if either operand must be NaN, or neither can be. */
6063 (simplify
6064 (unordered @0 @1)
6065 (switch
6066 (if (tree_expr_nan_p (@0) || tree_expr_nan_p (@1))
6067 { constant_boolean_node (true, type); })
6068 (if (!tree_expr_maybe_nan_p (@0) && !tree_expr_maybe_nan_p (@1))
6069 { constant_boolean_node (false, type); })))
6070
6071 /* Fold ORDERED if either operand must be NaN, or neither can be. */
6072 (simplify
6073 (ordered @0 @1)
6074 (switch
6075 (if (tree_expr_nan_p (@0) || tree_expr_nan_p (@1))
6076 { constant_boolean_node (false, type); })
6077 (if (!tree_expr_maybe_nan_p (@0) && !tree_expr_maybe_nan_p (@1))
6078 { constant_boolean_node (true, type); })))
6079
6080 /* bool_var != 0 becomes bool_var. */
6081 (simplify
6082 (ne @0 integer_zerop)
6083 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
6084 && types_match (type, TREE_TYPE (@0)))
6085 (non_lvalue @0)))
6086 /* bool_var == 1 becomes bool_var. */
6087 (simplify
6088 (eq @0 integer_onep)
6089 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
6090 && types_match (type, TREE_TYPE (@0)))
6091 (non_lvalue @0)))
6092 /* Do not handle
6093 bool_var == 0 becomes !bool_var or
6094 bool_var != 1 becomes !bool_var
6095 here because that only is good in assignment context as long
6096 as we require a tcc_comparison in GIMPLE_CONDs where we'd
6097 replace if (x == 0) with tem = ~x; if (tem != 0) which is
6098 clearly less optimal and which we'll transform again in forwprop. */
6099
6100 /* Transform comparisons of the form (X & Y) CMP 0 to X CMP2 Z
6101 where ~Y + 1 == pow2 and Z = ~Y. */
6102 (for cst (VECTOR_CST INTEGER_CST)
6103 (for cmp (eq ne)
6104 icmp (le gt)
6105 (simplify
6106 (cmp (bit_and:c@2 @0 cst@1) integer_zerop)
6107 (with { tree csts = bitmask_inv_cst_vector_p (@1); }
6108 (if (csts && (VECTOR_TYPE_P (TREE_TYPE (@1)) || single_use (@2)))
6109 (with { auto optab = VECTOR_TYPE_P (TREE_TYPE (@1))
6110 ? optab_vector : optab_default;
6111 tree utype = unsigned_type_for (TREE_TYPE (@1)); }
6112 (if (target_supports_op_p (utype, icmp, optab)
6113 || (optimize_vectors_before_lowering_p ()
6114 && (!target_supports_op_p (type, cmp, optab)
6115 || !target_supports_op_p (type, BIT_AND_EXPR, optab))))
6116 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
6117 (icmp @0 { csts; })
6118 (icmp (view_convert:utype @0) { csts; })))))))))
6119
6120 /* When one argument is a constant, overflow detection can be simplified.
6121 Currently restricted to single use so as not to interfere too much with
6122 ADD_OVERFLOW detection in tree-ssa-math-opts.cc.
6123 CONVERT?(CONVERT?(A) + CST) CMP A -> A CMP' CST' */
6124 (for cmp (lt le ge gt)
6125 out (gt gt le le)
6126 (simplify
6127 (cmp:c (convert?@3 (plus@2 (convert?@4 @0) INTEGER_CST@1)) @0)
6128 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@2))
6129 && types_match (TREE_TYPE (@0), TREE_TYPE (@3))
6130 && tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@0))
6131 && wi::to_wide (@1) != 0
6132 && single_use (@2))
6133 (with {
6134 unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0));
6135 signop sign = TYPE_SIGN (TREE_TYPE (@0));
6136 }
6137 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
6138 wi::max_value (prec, sign)
6139 - wi::to_wide (@1)); })))))
6140
6141 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
6142 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.cc
6143 expects the long form, so we restrict the transformation for now. */
6144 (for cmp (gt le)
6145 (simplify
6146 (cmp:c (minus@2 @0 @1) @0)
6147 (if (single_use (@2)
6148 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6149 && TYPE_UNSIGNED (TREE_TYPE (@0)))
6150 (cmp @1 @0))))
6151
6152 /* Optimize A - B + -1 >= A into B >= A for unsigned comparisons. */
6153 (for cmp (ge lt)
6154 (simplify
6155 (cmp:c (plus (minus @0 @1) integer_minus_onep) @0)
6156 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
6157 && TYPE_UNSIGNED (TREE_TYPE (@0)))
6158 (cmp @1 @0))))
6159
6160 /* Testing for overflow is unnecessary if we already know the result. */
6161 /* A - B > A */
6162 (for cmp (gt le)
6163 out (ne eq)
6164 (simplify
6165 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
6166 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
6167 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
6168 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
6169 /* A + B < A */
6170 (for cmp (lt ge)
6171 out (ne eq)
6172 (simplify
6173 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
6174 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
6175 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
6176 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
6177
6178 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
6179 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
6180 (for cmp (lt ge)
6181 out (ne eq)
6182 (simplify
6183 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
6184 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
6185 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
6186 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
6187
6188 /* Similarly, for unsigned operands, (((type) A * B) >> prec) != 0 where type
6189 is at least twice as wide as type of A and B, simplify to
6190 __builtin_mul_overflow (A, B, <unused>). */
6191 (for cmp (eq ne)
6192 (simplify
6193 (cmp (rshift (mult:s (convert@3 @0) (convert @1)) INTEGER_CST@2)
6194 integer_zerop)
6195 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6196 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
6197 && TYPE_UNSIGNED (TREE_TYPE (@0))
6198 && (TYPE_PRECISION (TREE_TYPE (@3))
6199 >= 2 * TYPE_PRECISION (TREE_TYPE (@0)))
6200 && tree_fits_uhwi_p (@2)
6201 && tree_to_uhwi (@2) == TYPE_PRECISION (TREE_TYPE (@0))
6202 && types_match (@0, @1)
6203 && type_has_mode_precision_p (TREE_TYPE (@0))
6204 && (optab_handler (umulv4_optab, TYPE_MODE (TREE_TYPE (@0)))
6205 != CODE_FOR_nothing))
6206 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
6207 (cmp (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
6208
6209 /* Demote operands of IFN_{ADD,SUB,MUL}_OVERFLOW. */
6210 (for ovf (IFN_ADD_OVERFLOW IFN_SUB_OVERFLOW IFN_MUL_OVERFLOW)
6211 (simplify
6212 (ovf (convert@2 @0) @1)
6213 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6214 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
6215 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
6216 && (!TYPE_UNSIGNED (TREE_TYPE (@2)) || TYPE_UNSIGNED (TREE_TYPE (@0))))
6217 (ovf @0 @1)))
6218 (simplify
6219 (ovf @1 (convert@2 @0))
6220 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6221 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
6222 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
6223 && (!TYPE_UNSIGNED (TREE_TYPE (@2)) || TYPE_UNSIGNED (TREE_TYPE (@0))))
6224 (ovf @1 @0))))
6225
6226 /* Optimize __builtin_mul_overflow_p (x, cst, (utype) 0) if all 3 types
6227 are unsigned to x > (umax / cst). Similarly for signed type, but
6228 in that case it needs to be outside of a range. */
6229 (simplify
6230 (imagpart (IFN_MUL_OVERFLOW:cs@2 @0 integer_nonzerop@1))
6231 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6232 && TYPE_MAX_VALUE (TREE_TYPE (@0))
6233 && types_match (TREE_TYPE (@0), TREE_TYPE (TREE_TYPE (@2)))
6234 && int_fits_type_p (@1, TREE_TYPE (@0)))
6235 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
6236 (convert (gt @0 (trunc_div! { TYPE_MAX_VALUE (TREE_TYPE (@0)); } @1)))
6237 (if (TYPE_MIN_VALUE (TREE_TYPE (@0)))
6238 (if (integer_minus_onep (@1))
6239 (convert (eq @0 { TYPE_MIN_VALUE (TREE_TYPE (@0)); }))
6240 (with
6241 {
6242 tree div = fold_convert (TREE_TYPE (@0), @1);
6243 tree lo = int_const_binop (TRUNC_DIV_EXPR,
6244 TYPE_MIN_VALUE (TREE_TYPE (@0)), div);
6245 tree hi = int_const_binop (TRUNC_DIV_EXPR,
6246 TYPE_MAX_VALUE (TREE_TYPE (@0)), div);
6247 tree etype = range_check_type (TREE_TYPE (@0));
6248 if (etype)
6249 {
6250 if (wi::neg_p (wi::to_wide (div)))
6251 std::swap (lo, hi);
6252 lo = fold_convert (etype, lo);
6253 hi = fold_convert (etype, hi);
6254 hi = int_const_binop (MINUS_EXPR, hi, lo);
6255 }
6256 }
6257 (if (etype)
6258 (convert (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
6259
6260 /* Simplification of math builtins. These rules must all be optimizations
6261 as well as IL simplifications. If there is a possibility that the new
6262 form could be a pessimization, the rule should go in the canonicalization
6263 section that follows this one.
6264
6265 Rules can generally go in this section if they satisfy one of
6266 the following:
6267
6268 - the rule describes an identity
6269
6270 - the rule replaces calls with something as simple as addition or
6271 multiplication
6272
6273 - the rule contains unary calls only and simplifies the surrounding
6274 arithmetic. (The idea here is to exclude non-unary calls in which
6275 one operand is constant and in which the call is known to be cheap
6276 when the operand has that value.) */
6277
6278 (if (flag_unsafe_math_optimizations)
6279 /* Simplify sqrt(x) * sqrt(x) -> x. */
6280 (simplify
6281 (mult (SQRT_ALL@1 @0) @1)
6282 (if (!tree_expr_maybe_signaling_nan_p (@0))
6283 @0))
6284
6285 (for op (plus minus)
6286 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
6287 (simplify
6288 (op (rdiv @0 @1)
6289 (rdiv @2 @1))
6290 (rdiv (op @0 @2) @1)))
6291
6292 (for cmp (lt le gt ge)
6293 neg_cmp (gt ge lt le)
6294 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
6295 (simplify
6296 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
6297 (with
6298 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
6299 (if (tem
6300 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
6301 || (real_zerop (tem) && !real_zerop (@1))))
6302 (switch
6303 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
6304 (cmp @0 { tem; }))
6305 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
6306 (neg_cmp @0 { tem; })))))))
6307
6308 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
6309 (for root (SQRT CBRT)
6310 (simplify
6311 (mult (root:s @0) (root:s @1))
6312 (root (mult @0 @1))))
6313
6314 /* Simplify expN(x) * expN(y) -> expN(x+y). */
6315 (for exps (EXP EXP2 EXP10 POW10)
6316 (simplify
6317 (mult (exps:s @0) (exps:s @1))
6318 (exps (plus @0 @1))))
6319
6320 /* Simplify a/root(b/c) into a*root(c/b). */
6321 (for root (SQRT CBRT)
6322 (simplify
6323 (rdiv @0 (root:s (rdiv:s @1 @2)))
6324 (mult @0 (root (rdiv @2 @1)))))
6325
6326 /* Simplify x/expN(y) into x*expN(-y). */
6327 (for exps (EXP EXP2 EXP10 POW10)
6328 (simplify
6329 (rdiv @0 (exps:s @1))
6330 (mult @0 (exps (negate @1)))))
6331
6332 (for logs (LOG LOG2 LOG10 LOG10)
6333 exps (EXP EXP2 EXP10 POW10)
6334 /* logN(expN(x)) -> x. */
6335 (simplify
6336 (logs (exps @0))
6337 @0)
6338 /* expN(logN(x)) -> x. */
6339 (simplify
6340 (exps (logs @0))
6341 @0))
6342
6343 /* Optimize logN(func()) for various exponential functions. We
6344 want to determine the value "x" and the power "exponent" in
6345 order to transform logN(x**exponent) into exponent*logN(x). */
6346 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
6347 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
6348 (simplify
6349 (logs (exps @0))
6350 (if (SCALAR_FLOAT_TYPE_P (type))
6351 (with {
6352 tree x;
6353 switch (exps)
6354 {
6355 CASE_CFN_EXP:
6356 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
6357 x = build_real_truncate (type, dconst_e ());
6358 break;
6359 CASE_CFN_EXP2:
6360 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
6361 x = build_real (type, dconst2);
6362 break;
6363 CASE_CFN_EXP10:
6364 CASE_CFN_POW10:
6365 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
6366 {
6367 REAL_VALUE_TYPE dconst10;
6368 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
6369 x = build_real (type, dconst10);
6370 }
6371 break;
6372 default:
6373 gcc_unreachable ();
6374 }
6375 }
6376 (mult (logs { x; }) @0)))))
6377
6378 (for logs (LOG LOG
6379 LOG2 LOG2
6380 LOG10 LOG10)
6381 exps (SQRT CBRT)
6382 (simplify
6383 (logs (exps @0))
6384 (if (SCALAR_FLOAT_TYPE_P (type))
6385 (with {
6386 tree x;
6387 switch (exps)
6388 {
6389 CASE_CFN_SQRT:
6390 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
6391 x = build_real (type, dconsthalf);
6392 break;
6393 CASE_CFN_CBRT:
6394 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
6395 x = build_real_truncate (type, dconst_third ());
6396 break;
6397 default:
6398 gcc_unreachable ();
6399 }
6400 }
6401 (mult { x; } (logs @0))))))
6402
6403 /* logN(pow(x,exponent)) -> exponent*logN(x). */
6404 (for logs (LOG LOG2 LOG10)
6405 pows (POW)
6406 (simplify
6407 (logs (pows @0 @1))
6408 (mult @1 (logs @0))))
6409
6410 /* pow(C,x) -> exp(log(C)*x) if C > 0,
6411 or if C is a positive power of 2,
6412 pow(C,x) -> exp2(log2(C)*x). */
6413 #if GIMPLE
6414 (for pows (POW)
6415 exps (EXP)
6416 logs (LOG)
6417 exp2s (EXP2)
6418 log2s (LOG2)
6419 (simplify
6420 (pows REAL_CST@0 @1)
6421 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
6422 && real_isfinite (TREE_REAL_CST_PTR (@0))
6423 /* As libmvec doesn't have a vectorized exp2, defer optimizing
6424 the use_exp2 case until after vectorization. It seems actually
6425 beneficial for all constants to postpone this until later,
6426 because exp(log(C)*x), while faster, will have worse precision
6427 and if x folds into a constant too, that is unnecessary
6428 pessimization. */
6429 && canonicalize_math_after_vectorization_p ())
6430 (with {
6431 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
6432 bool use_exp2 = false;
6433 if (targetm.libc_has_function (function_c99_misc, TREE_TYPE (@0))
6434 && value->cl == rvc_normal)
6435 {
6436 REAL_VALUE_TYPE frac_rvt = *value;
6437 SET_REAL_EXP (&frac_rvt, 1);
6438 if (real_equal (&frac_rvt, &dconst1))
6439 use_exp2 = true;
6440 }
6441 }
6442 (if (!use_exp2)
6443 (if (optimize_pow_to_exp (@0, @1))
6444 (exps (mult (logs @0) @1)))
6445 (exp2s (mult (log2s @0) @1)))))))
6446 #endif
6447
6448 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
6449 (for pows (POW)
6450 exps (EXP EXP2 EXP10 POW10)
6451 logs (LOG LOG2 LOG10 LOG10)
6452 (simplify
6453 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
6454 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
6455 && real_isfinite (TREE_REAL_CST_PTR (@0)))
6456 (exps (plus (mult (logs @0) @1) @2)))))
6457
6458 (for sqrts (SQRT)
6459 cbrts (CBRT)
6460 pows (POW)
6461 exps (EXP EXP2 EXP10 POW10)
6462 /* sqrt(expN(x)) -> expN(x*0.5). */
6463 (simplify
6464 (sqrts (exps @0))
6465 (exps (mult @0 { build_real (type, dconsthalf); })))
6466 /* cbrt(expN(x)) -> expN(x/3). */
6467 (simplify
6468 (cbrts (exps @0))
6469 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
6470 /* pow(expN(x), y) -> expN(x*y). */
6471 (simplify
6472 (pows (exps @0) @1)
6473 (exps (mult @0 @1))))
6474
6475 /* tan(atan(x)) -> x. */
6476 (for tans (TAN)
6477 atans (ATAN)
6478 (simplify
6479 (tans (atans @0))
6480 @0)))
6481
6482 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
6483 (for sins (SIN)
6484 atans (ATAN)
6485 sqrts (SQRT)
6486 copysigns (COPYSIGN)
6487 (simplify
6488 (sins (atans:s @0))
6489 (with
6490 {
6491 REAL_VALUE_TYPE r_cst;
6492 build_sinatan_real (&r_cst, type);
6493 tree t_cst = build_real (type, r_cst);
6494 tree t_one = build_one_cst (type);
6495 }
6496 (if (SCALAR_FLOAT_TYPE_P (type))
6497 (cond (lt (abs @0) { t_cst; })
6498 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
6499 (copysigns { t_one; } @0))))))
6500
6501 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
6502 (for coss (COS)
6503 atans (ATAN)
6504 sqrts (SQRT)
6505 copysigns (COPYSIGN)
6506 (simplify
6507 (coss (atans:s @0))
6508 (with
6509 {
6510 REAL_VALUE_TYPE r_cst;
6511 build_sinatan_real (&r_cst, type);
6512 tree t_cst = build_real (type, r_cst);
6513 tree t_one = build_one_cst (type);
6514 tree t_zero = build_zero_cst (type);
6515 }
6516 (if (SCALAR_FLOAT_TYPE_P (type))
6517 (cond (lt (abs @0) { t_cst; })
6518 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
6519 (copysigns { t_zero; } @0))))))
6520
6521 (if (!flag_errno_math)
6522 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
6523 (for sinhs (SINH)
6524 atanhs (ATANH)
6525 sqrts (SQRT)
6526 (simplify
6527 (sinhs (atanhs:s @0))
6528 (with { tree t_one = build_one_cst (type); }
6529 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
6530
6531 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
6532 (for coshs (COSH)
6533 atanhs (ATANH)
6534 sqrts (SQRT)
6535 (simplify
6536 (coshs (atanhs:s @0))
6537 (with { tree t_one = build_one_cst (type); }
6538 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
6539
6540 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
6541 (simplify
6542 (CABS (complex:C @0 real_zerop@1))
6543 (abs @0))
6544
6545 /* trunc(trunc(x)) -> trunc(x), etc. */
6546 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
6547 (simplify
6548 (fns (fns @0))
6549 (fns @0)))
6550 /* f(x) -> x if x is integer valued and f does nothing for such values. */
6551 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
6552 (simplify
6553 (fns integer_valued_real_p@0)
6554 @0))
6555
6556 /* hypot(x,0) and hypot(0,x) -> abs(x). */
6557 (simplify
6558 (HYPOT:c @0 real_zerop@1)
6559 (abs @0))
6560
6561 /* pow(1,x) -> 1. */
6562 (simplify
6563 (POW real_onep@0 @1)
6564 @0)
6565
6566 (simplify
6567 /* copysign(x,x) -> x. */
6568 (COPYSIGN_ALL @0 @0)
6569 @0)
6570
6571 (simplify
6572 /* copysign(x,-x) -> -x. */
6573 (COPYSIGN_ALL @0 (negate@1 @0))
6574 @1)
6575
6576 (simplify
6577 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
6578 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
6579 (abs @0))
6580
6581 (for scale (LDEXP SCALBN SCALBLN)
6582 /* ldexp(0, x) -> 0. */
6583 (simplify
6584 (scale real_zerop@0 @1)
6585 @0)
6586 /* ldexp(x, 0) -> x. */
6587 (simplify
6588 (scale @0 integer_zerop@1)
6589 @0)
6590 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
6591 (simplify
6592 (scale REAL_CST@0 @1)
6593 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
6594 @0)))
6595
6596 /* Canonicalization of sequences of math builtins. These rules represent
6597 IL simplifications but are not necessarily optimizations.
6598
6599 The sincos pass is responsible for picking "optimal" implementations
6600 of math builtins, which may be more complicated and can sometimes go
6601 the other way, e.g. converting pow into a sequence of sqrts.
6602 We only want to do these canonicalizations before the pass has run. */
6603
6604 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
6605 /* Simplify tan(x) * cos(x) -> sin(x). */
6606 (simplify
6607 (mult:c (TAN:s @0) (COS:s @0))
6608 (SIN @0))
6609
6610 /* Simplify x * pow(x,c) -> pow(x,c+1). */
6611 (simplify
6612 (mult:c @0 (POW:s @0 REAL_CST@1))
6613 (if (!TREE_OVERFLOW (@1))
6614 (POW @0 (plus @1 { build_one_cst (type); }))))
6615
6616 /* Simplify sin(x) / cos(x) -> tan(x). */
6617 (simplify
6618 (rdiv (SIN:s @0) (COS:s @0))
6619 (TAN @0))
6620
6621 /* Simplify sinh(x) / cosh(x) -> tanh(x). */
6622 (simplify
6623 (rdiv (SINH:s @0) (COSH:s @0))
6624 (TANH @0))
6625
6626 /* Simplify tanh (x) / sinh (x) -> 1.0 / cosh (x). */
6627 (simplify
6628 (rdiv (TANH:s @0) (SINH:s @0))
6629 (rdiv {build_one_cst (type);} (COSH @0)))
6630
6631 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
6632 (simplify
6633 (rdiv (COS:s @0) (SIN:s @0))
6634 (rdiv { build_one_cst (type); } (TAN @0)))
6635
6636 /* Simplify sin(x) / tan(x) -> cos(x). */
6637 (simplify
6638 (rdiv (SIN:s @0) (TAN:s @0))
6639 (if (! HONOR_NANS (@0)
6640 && ! HONOR_INFINITIES (@0))
6641 (COS @0)))
6642
6643 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
6644 (simplify
6645 (rdiv (TAN:s @0) (SIN:s @0))
6646 (if (! HONOR_NANS (@0)
6647 && ! HONOR_INFINITIES (@0))
6648 (rdiv { build_one_cst (type); } (COS @0))))
6649
6650 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
6651 (simplify
6652 (mult (POW:s @0 @1) (POW:s @0 @2))
6653 (POW @0 (plus @1 @2)))
6654
6655 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
6656 (simplify
6657 (mult (POW:s @0 @1) (POW:s @2 @1))
6658 (POW (mult @0 @2) @1))
6659
6660 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
6661 (simplify
6662 (mult (POWI:s @0 @1) (POWI:s @2 @1))
6663 (POWI (mult @0 @2) @1))
6664
6665 /* Simplify pow(x,c) / x -> pow(x,c-1). */
6666 (simplify
6667 (rdiv (POW:s @0 REAL_CST@1) @0)
6668 (if (!TREE_OVERFLOW (@1))
6669 (POW @0 (minus @1 { build_one_cst (type); }))))
6670
6671 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
6672 (simplify
6673 (rdiv @0 (POW:s @1 @2))
6674 (mult @0 (POW @1 (negate @2))))
6675
6676 (for sqrts (SQRT)
6677 cbrts (CBRT)
6678 pows (POW)
6679 /* sqrt(sqrt(x)) -> pow(x,1/4). */
6680 (simplify
6681 (sqrts (sqrts @0))
6682 (pows @0 { build_real (type, dconst_quarter ()); }))
6683 /* sqrt(cbrt(x)) -> pow(x,1/6). */
6684 (simplify
6685 (sqrts (cbrts @0))
6686 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
6687 /* cbrt(sqrt(x)) -> pow(x,1/6). */
6688 (simplify
6689 (cbrts (sqrts @0))
6690 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
6691 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
6692 (simplify
6693 (cbrts (cbrts tree_expr_nonnegative_p@0))
6694 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
6695 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
6696 (simplify
6697 (sqrts (pows @0 @1))
6698 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
6699 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
6700 (simplify
6701 (cbrts (pows tree_expr_nonnegative_p@0 @1))
6702 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
6703 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
6704 (simplify
6705 (pows (sqrts @0) @1)
6706 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
6707 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
6708 (simplify
6709 (pows (cbrts tree_expr_nonnegative_p@0) @1)
6710 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
6711 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
6712 (simplify
6713 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
6714 (pows @0 (mult @1 @2))))
6715
6716 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
6717 (simplify
6718 (CABS (complex @0 @0))
6719 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
6720
6721 /* hypot(x,x) -> fabs(x)*sqrt(2). */
6722 (simplify
6723 (HYPOT @0 @0)
6724 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
6725
6726 /* cexp(x+yi) -> exp(x)*cexpi(y). */
6727 (for cexps (CEXP)
6728 exps (EXP)
6729 cexpis (CEXPI)
6730 (simplify
6731 (cexps compositional_complex@0)
6732 (if (targetm.libc_has_function (function_c99_math_complex, TREE_TYPE (@0)))
6733 (complex
6734 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
6735 (mult @1 (imagpart @2)))))))
6736
6737 (if (canonicalize_math_p ())
6738 /* floor(x) -> trunc(x) if x is nonnegative. */
6739 (for floors (FLOOR_ALL)
6740 truncs (TRUNC_ALL)
6741 (simplify
6742 (floors tree_expr_nonnegative_p@0)
6743 (truncs @0))))
6744
6745 (match double_value_p
6746 @0
6747 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
6748 (for froms (BUILT_IN_TRUNCL
6749 BUILT_IN_FLOORL
6750 BUILT_IN_CEILL
6751 BUILT_IN_ROUNDL
6752 BUILT_IN_NEARBYINTL
6753 BUILT_IN_RINTL)
6754 tos (BUILT_IN_TRUNC
6755 BUILT_IN_FLOOR
6756 BUILT_IN_CEIL
6757 BUILT_IN_ROUND
6758 BUILT_IN_NEARBYINT
6759 BUILT_IN_RINT)
6760 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
6761 (if (optimize && canonicalize_math_p ())
6762 (simplify
6763 (froms (convert double_value_p@0))
6764 (convert (tos @0)))))
6765
6766 (match float_value_p
6767 @0
6768 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
6769 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
6770 BUILT_IN_FLOORL BUILT_IN_FLOOR
6771 BUILT_IN_CEILL BUILT_IN_CEIL
6772 BUILT_IN_ROUNDL BUILT_IN_ROUND
6773 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
6774 BUILT_IN_RINTL BUILT_IN_RINT)
6775 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
6776 BUILT_IN_FLOORF BUILT_IN_FLOORF
6777 BUILT_IN_CEILF BUILT_IN_CEILF
6778 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
6779 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
6780 BUILT_IN_RINTF BUILT_IN_RINTF)
6781 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
6782 if x is a float. */
6783 (if (optimize && canonicalize_math_p ()
6784 && targetm.libc_has_function (function_c99_misc, NULL_TREE))
6785 (simplify
6786 (froms (convert float_value_p@0))
6787 (convert (tos @0)))))
6788
6789 #if GIMPLE
6790 (match float16_value_p
6791 @0
6792 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float16_type_node)))
6793 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC BUILT_IN_TRUNCF
6794 BUILT_IN_FLOORL BUILT_IN_FLOOR BUILT_IN_FLOORF
6795 BUILT_IN_CEILL BUILT_IN_CEIL BUILT_IN_CEILF
6796 BUILT_IN_ROUNDEVENL BUILT_IN_ROUNDEVEN BUILT_IN_ROUNDEVENF
6797 BUILT_IN_ROUNDL BUILT_IN_ROUND BUILT_IN_ROUNDF
6798 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT BUILT_IN_NEARBYINTF
6799 BUILT_IN_RINTL BUILT_IN_RINT BUILT_IN_RINTF
6800 BUILT_IN_SQRTL BUILT_IN_SQRT BUILT_IN_SQRTF)
6801 tos (IFN_TRUNC IFN_TRUNC IFN_TRUNC
6802 IFN_FLOOR IFN_FLOOR IFN_FLOOR
6803 IFN_CEIL IFN_CEIL IFN_CEIL
6804 IFN_ROUNDEVEN IFN_ROUNDEVEN IFN_ROUNDEVEN
6805 IFN_ROUND IFN_ROUND IFN_ROUND
6806 IFN_NEARBYINT IFN_NEARBYINT IFN_NEARBYINT
6807 IFN_RINT IFN_RINT IFN_RINT
6808 IFN_SQRT IFN_SQRT IFN_SQRT)
6809 /* (_Float16) round ((doube) x) -> __built_in_roundf16 (x), etc.,
6810 if x is a _Float16. */
6811 (simplify
6812 (convert (froms (convert float16_value_p@0)))
6813 (if (optimize
6814 && types_match (type, TREE_TYPE (@0))
6815 && direct_internal_fn_supported_p (as_internal_fn (tos),
6816 type, OPTIMIZE_FOR_BOTH))
6817 (tos @0))))
6818
6819 /* Simplify (trunc)copysign ((extend)x, (extend)y) to copysignf (x, y),
6820 x,y is float value, similar for _Float16/double. */
6821 (for copysigns (COPYSIGN_ALL)
6822 (simplify
6823 (convert (copysigns (convert@2 @0) (convert @1)))
6824 (if (optimize
6825 && !HONOR_SNANS (@2)
6826 && types_match (type, TREE_TYPE (@0))
6827 && types_match (type, TREE_TYPE (@1))
6828 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@2))
6829 && direct_internal_fn_supported_p (IFN_COPYSIGN,
6830 type, OPTIMIZE_FOR_BOTH))
6831 (IFN_COPYSIGN @0 @1))))
6832
6833 (for froms (BUILT_IN_FMAF BUILT_IN_FMA BUILT_IN_FMAL)
6834 tos (IFN_FMA IFN_FMA IFN_FMA)
6835 (simplify
6836 (convert (froms (convert@3 @0) (convert @1) (convert @2)))
6837 (if (flag_unsafe_math_optimizations
6838 && optimize
6839 && FLOAT_TYPE_P (type)
6840 && FLOAT_TYPE_P (TREE_TYPE (@3))
6841 && types_match (type, TREE_TYPE (@0))
6842 && types_match (type, TREE_TYPE (@1))
6843 && types_match (type, TREE_TYPE (@2))
6844 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (@3))
6845 && direct_internal_fn_supported_p (as_internal_fn (tos),
6846 type, OPTIMIZE_FOR_BOTH))
6847 (tos @0 @1 @2))))
6848
6849 (for maxmin (max min)
6850 (simplify
6851 (convert (maxmin (convert@2 @0) (convert @1)))
6852 (if (optimize
6853 && FLOAT_TYPE_P (type)
6854 && FLOAT_TYPE_P (TREE_TYPE (@2))
6855 && types_match (type, TREE_TYPE (@0))
6856 && types_match (type, TREE_TYPE (@1))
6857 && element_precision (type) < element_precision (TREE_TYPE (@2)))
6858 (maxmin @0 @1))))
6859 #endif
6860
6861 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
6862 tos (XFLOOR XCEIL XROUND XRINT)
6863 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
6864 (if (optimize && canonicalize_math_p ())
6865 (simplify
6866 (froms (convert double_value_p@0))
6867 (tos @0))))
6868
6869 (for froms (XFLOORL XCEILL XROUNDL XRINTL
6870 XFLOOR XCEIL XROUND XRINT)
6871 tos (XFLOORF XCEILF XROUNDF XRINTF)
6872 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
6873 if x is a float. */
6874 (if (optimize && canonicalize_math_p ())
6875 (simplify
6876 (froms (convert float_value_p@0))
6877 (tos @0))))
6878
6879 (if (canonicalize_math_p ())
6880 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
6881 (for floors (IFLOOR LFLOOR LLFLOOR)
6882 (simplify
6883 (floors tree_expr_nonnegative_p@0)
6884 (fix_trunc @0))))
6885
6886 (if (canonicalize_math_p ())
6887 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
6888 (for fns (IFLOOR LFLOOR LLFLOOR
6889 ICEIL LCEIL LLCEIL
6890 IROUND LROUND LLROUND)
6891 (simplify
6892 (fns integer_valued_real_p@0)
6893 (fix_trunc @0)))
6894 (if (!flag_errno_math)
6895 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
6896 (for rints (IRINT LRINT LLRINT)
6897 (simplify
6898 (rints integer_valued_real_p@0)
6899 (fix_trunc @0)))))
6900
6901 (if (canonicalize_math_p ())
6902 (for ifn (IFLOOR ICEIL IROUND IRINT)
6903 lfn (LFLOOR LCEIL LROUND LRINT)
6904 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
6905 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
6906 sizeof (int) == sizeof (long). */
6907 (if (TYPE_PRECISION (integer_type_node)
6908 == TYPE_PRECISION (long_integer_type_node))
6909 (simplify
6910 (ifn @0)
6911 (lfn:long_integer_type_node @0)))
6912 /* Canonicalize llround (x) to lround (x) on LP64 targets where
6913 sizeof (long long) == sizeof (long). */
6914 (if (TYPE_PRECISION (long_long_integer_type_node)
6915 == TYPE_PRECISION (long_integer_type_node))
6916 (simplify
6917 (llfn @0)
6918 (lfn:long_integer_type_node @0)))))
6919
6920 /* cproj(x) -> x if we're ignoring infinities. */
6921 (simplify
6922 (CPROJ @0)
6923 (if (!HONOR_INFINITIES (type))
6924 @0))
6925
6926 /* If the real part is inf and the imag part is known to be
6927 nonnegative, return (inf + 0i). */
6928 (simplify
6929 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
6930 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
6931 { build_complex_inf (type, false); }))
6932
6933 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
6934 (simplify
6935 (CPROJ (complex @0 REAL_CST@1))
6936 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
6937 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
6938
6939 (for pows (POW)
6940 sqrts (SQRT)
6941 cbrts (CBRT)
6942 (simplify
6943 (pows @0 REAL_CST@1)
6944 (with {
6945 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
6946 REAL_VALUE_TYPE tmp;
6947 }
6948 (switch
6949 /* pow(x,0) -> 1. */
6950 (if (real_equal (value, &dconst0))
6951 { build_real (type, dconst1); })
6952 /* pow(x,1) -> x. */
6953 (if (real_equal (value, &dconst1))
6954 @0)
6955 /* pow(x,-1) -> 1/x. */
6956 (if (real_equal (value, &dconstm1))
6957 (rdiv { build_real (type, dconst1); } @0))
6958 /* pow(x,0.5) -> sqrt(x). */
6959 (if (flag_unsafe_math_optimizations
6960 && canonicalize_math_p ()
6961 && real_equal (value, &dconsthalf))
6962 (sqrts @0))
6963 /* pow(x,1/3) -> cbrt(x). */
6964 (if (flag_unsafe_math_optimizations
6965 && canonicalize_math_p ()
6966 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
6967 real_equal (value, &tmp)))
6968 (cbrts @0))))))
6969
6970 /* powi(1,x) -> 1. */
6971 (simplify
6972 (POWI real_onep@0 @1)
6973 @0)
6974
6975 (simplify
6976 (POWI @0 INTEGER_CST@1)
6977 (switch
6978 /* powi(x,0) -> 1. */
6979 (if (wi::to_wide (@1) == 0)
6980 { build_real (type, dconst1); })
6981 /* powi(x,1) -> x. */
6982 (if (wi::to_wide (@1) == 1)
6983 @0)
6984 /* powi(x,-1) -> 1/x. */
6985 (if (wi::to_wide (@1) == -1)
6986 (rdiv { build_real (type, dconst1); } @0))))
6987
6988 /* Narrowing of arithmetic and logical operations.
6989
6990 These are conceptually similar to the transformations performed for
6991 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
6992 term we want to move all that code out of the front-ends into here. */
6993
6994 /* Convert (outertype)((innertype0)a+(innertype1)b)
6995 into ((newtype)a+(newtype)b) where newtype
6996 is the widest mode from all of these. */
6997 (for op (plus minus mult rdiv)
6998 (simplify
6999 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
7000 /* If we have a narrowing conversion of an arithmetic operation where
7001 both operands are widening conversions from the same type as the outer
7002 narrowing conversion. Then convert the innermost operands to a
7003 suitable unsigned type (to avoid introducing undefined behavior),
7004 perform the operation and convert the result to the desired type. */
7005 (if (INTEGRAL_TYPE_P (type)
7006 && op != MULT_EXPR
7007 && op != RDIV_EXPR
7008 /* We check for type compatibility between @0 and @1 below,
7009 so there's no need to check that @2/@4 are integral types. */
7010 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
7011 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
7012 /* The precision of the type of each operand must match the
7013 precision of the mode of each operand, similarly for the
7014 result. */
7015 && type_has_mode_precision_p (TREE_TYPE (@1))
7016 && type_has_mode_precision_p (TREE_TYPE (@2))
7017 && type_has_mode_precision_p (type)
7018 /* The inner conversion must be a widening conversion. */
7019 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
7020 && types_match (@1, type)
7021 && (types_match (@1, @2)
7022 /* Or the second operand is const integer or converted const
7023 integer from valueize. */
7024 || poly_int_tree_p (@4)))
7025 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
7026 (op @1 (convert @2))
7027 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
7028 (convert (op (convert:utype @1)
7029 (convert:utype @2)))))
7030 (if (FLOAT_TYPE_P (type)
7031 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
7032 == DECIMAL_FLOAT_TYPE_P (type))
7033 (with { tree arg0 = strip_float_extensions (@1);
7034 tree arg1 = strip_float_extensions (@2);
7035 tree itype = TREE_TYPE (@0);
7036 tree ty1 = TREE_TYPE (arg0);
7037 tree ty2 = TREE_TYPE (arg1);
7038 enum tree_code code = TREE_CODE (itype); }
7039 (if (FLOAT_TYPE_P (ty1)
7040 && FLOAT_TYPE_P (ty2))
7041 (with { tree newtype = type;
7042 if (TYPE_MODE (ty1) == SDmode
7043 || TYPE_MODE (ty2) == SDmode
7044 || TYPE_MODE (type) == SDmode)
7045 newtype = dfloat32_type_node;
7046 if (TYPE_MODE (ty1) == DDmode
7047 || TYPE_MODE (ty2) == DDmode
7048 || TYPE_MODE (type) == DDmode)
7049 newtype = dfloat64_type_node;
7050 if (TYPE_MODE (ty1) == TDmode
7051 || TYPE_MODE (ty2) == TDmode
7052 || TYPE_MODE (type) == TDmode)
7053 newtype = dfloat128_type_node; }
7054 (if ((newtype == dfloat32_type_node
7055 || newtype == dfloat64_type_node
7056 || newtype == dfloat128_type_node)
7057 && newtype == type
7058 && types_match (newtype, type))
7059 (op (convert:newtype @1) (convert:newtype @2))
7060 (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype))
7061 newtype = ty1;
7062 if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype))
7063 newtype = ty2; }
7064 /* Sometimes this transformation is safe (cannot
7065 change results through affecting double rounding
7066 cases) and sometimes it is not. If NEWTYPE is
7067 wider than TYPE, e.g. (float)((long double)double
7068 + (long double)double) converted to
7069 (float)(double + double), the transformation is
7070 unsafe regardless of the details of the types
7071 involved; double rounding can arise if the result
7072 of NEWTYPE arithmetic is a NEWTYPE value half way
7073 between two representable TYPE values but the
7074 exact value is sufficiently different (in the
7075 right direction) for this difference to be
7076 visible in ITYPE arithmetic. If NEWTYPE is the
7077 same as TYPE, however, the transformation may be
7078 safe depending on the types involved: it is safe
7079 if the ITYPE has strictly more than twice as many
7080 mantissa bits as TYPE, can represent infinities
7081 and NaNs if the TYPE can, and has sufficient
7082 exponent range for the product or ratio of two
7083 values representable in the TYPE to be within the
7084 range of normal values of ITYPE. */
7085 (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
7086 && (flag_unsafe_math_optimizations
7087 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
7088 && real_can_shorten_arithmetic (TYPE_MODE (itype),
7089 TYPE_MODE (type))
7090 && !excess_precision_type (newtype)))
7091 && !types_match (itype, newtype))
7092 (convert:type (op (convert:newtype @1)
7093 (convert:newtype @2)))
7094 )))) )
7095 ))
7096 )))
7097
7098 /* This is another case of narrowing, specifically when there's an outer
7099 BIT_AND_EXPR which masks off bits outside the type of the innermost
7100 operands. Like the previous case we have to convert the operands
7101 to unsigned types to avoid introducing undefined behavior for the
7102 arithmetic operation. */
7103 (for op (minus plus)
7104 (simplify
7105 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
7106 (if (INTEGRAL_TYPE_P (type)
7107 /* We check for type compatibility between @0 and @1 below,
7108 so there's no need to check that @1/@3 are integral types. */
7109 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
7110 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
7111 /* The precision of the type of each operand must match the
7112 precision of the mode of each operand, similarly for the
7113 result. */
7114 && type_has_mode_precision_p (TREE_TYPE (@0))
7115 && type_has_mode_precision_p (TREE_TYPE (@1))
7116 && type_has_mode_precision_p (type)
7117 /* The inner conversion must be a widening conversion. */
7118 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
7119 && types_match (@0, @1)
7120 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
7121 <= TYPE_PRECISION (TREE_TYPE (@0)))
7122 && (wi::to_wide (@4)
7123 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
7124 true, TYPE_PRECISION (type))) == 0)
7125 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
7126 (with { tree ntype = TREE_TYPE (@0); }
7127 (convert (bit_and (op @0 @1) (convert:ntype @4))))
7128 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
7129 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
7130 (convert:utype @4))))))))
7131
7132 /* Transform (@0 < @1 and @0 < @2) to use min,
7133 (@0 > @1 and @0 > @2) to use max */
7134 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
7135 op (lt le gt ge lt le gt ge )
7136 ext (min min max max max max min min )
7137 (simplify
7138 (logic (op:cs @0 @1) (op:cs @0 @2))
7139 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7140 && TREE_CODE (@0) != INTEGER_CST)
7141 (op @0 (ext @1 @2)))))
7142
7143 (simplify
7144 /* signbit(x) -> 0 if x is nonnegative. */
7145 (SIGNBIT tree_expr_nonnegative_p@0)
7146 { integer_zero_node; })
7147
7148 (simplify
7149 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
7150 (SIGNBIT @0)
7151 (if (!HONOR_SIGNED_ZEROS (@0))
7152 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
7153
7154 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
7155 (for cmp (eq ne)
7156 (for op (plus minus)
7157 rop (minus plus)
7158 (simplify
7159 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
7160 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
7161 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
7162 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
7163 && !TYPE_SATURATING (TREE_TYPE (@0)))
7164 (with { tree res = int_const_binop (rop, @2, @1); }
7165 (if (TREE_OVERFLOW (res)
7166 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
7167 { constant_boolean_node (cmp == NE_EXPR, type); }
7168 (if (single_use (@3))
7169 (cmp @0 { TREE_OVERFLOW (res)
7170 ? drop_tree_overflow (res) : res; }))))))))
7171 (for cmp (lt le gt ge)
7172 (for op (plus minus)
7173 rop (minus plus)
7174 (simplify
7175 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
7176 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
7177 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
7178 (with { tree res = int_const_binop (rop, @2, @1); }
7179 (if (TREE_OVERFLOW (res))
7180 {
7181 fold_overflow_warning (("assuming signed overflow does not occur "
7182 "when simplifying conditional to constant"),
7183 WARN_STRICT_OVERFLOW_CONDITIONAL);
7184 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
7185 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
7186 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
7187 TYPE_SIGN (TREE_TYPE (@1)))
7188 != (op == MINUS_EXPR);
7189 constant_boolean_node (less == ovf_high, type);
7190 }
7191 (if (single_use (@3))
7192 (with
7193 {
7194 fold_overflow_warning (("assuming signed overflow does not occur "
7195 "when changing X +- C1 cmp C2 to "
7196 "X cmp C2 -+ C1"),
7197 WARN_STRICT_OVERFLOW_COMPARISON);
7198 }
7199 (cmp @0 { res; })))))))))
7200
7201 /* Canonicalizations of BIT_FIELD_REFs. */
7202
7203 (simplify
7204 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
7205 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
7206
7207 (simplify
7208 (BIT_FIELD_REF (view_convert @0) @1 @2)
7209 (BIT_FIELD_REF @0 @1 @2))
7210
7211 (simplify
7212 (BIT_FIELD_REF @0 @1 integer_zerop)
7213 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
7214 (view_convert @0)))
7215
7216 (simplify
7217 (BIT_FIELD_REF @0 @1 @2)
7218 (switch
7219 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
7220 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
7221 (switch
7222 (if (integer_zerop (@2))
7223 (view_convert (realpart @0)))
7224 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
7225 (view_convert (imagpart @0)))))
7226 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7227 && INTEGRAL_TYPE_P (type)
7228 /* On GIMPLE this should only apply to register arguments. */
7229 && (! GIMPLE || is_gimple_reg (@0))
7230 /* A bit-field-ref that referenced the full argument can be stripped. */
7231 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
7232 && integer_zerop (@2))
7233 /* Low-parts can be reduced to integral conversions.
7234 ??? The following doesn't work for PDP endian. */
7235 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
7236 /* But only do this after vectorization. */
7237 && canonicalize_math_after_vectorization_p ()
7238 /* Don't even think about BITS_BIG_ENDIAN. */
7239 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
7240 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
7241 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
7242 ? (TYPE_PRECISION (TREE_TYPE (@0))
7243 - TYPE_PRECISION (type))
7244 : 0)) == 0)))
7245 (convert @0))))
7246
7247 /* Simplify vector extracts. */
7248
7249 (simplify
7250 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
7251 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
7252 && tree_fits_uhwi_p (TYPE_SIZE (type))
7253 && ((tree_to_uhwi (TYPE_SIZE (type))
7254 == tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
7255 || (VECTOR_TYPE_P (type)
7256 && (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))
7257 == tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))))))
7258 (with
7259 {
7260 tree ctor = (TREE_CODE (@0) == SSA_NAME
7261 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
7262 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
7263 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
7264 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
7265 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
7266 }
7267 (if (n != 0
7268 && (idx % width) == 0
7269 && (n % width) == 0
7270 && known_le ((idx + n) / width,
7271 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
7272 (with
7273 {
7274 idx = idx / width;
7275 n = n / width;
7276 /* Constructor elements can be subvectors. */
7277 poly_uint64 k = 1;
7278 if (CONSTRUCTOR_NELTS (ctor) != 0)
7279 {
7280 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
7281 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
7282 k = TYPE_VECTOR_SUBPARTS (cons_elem);
7283 }
7284 unsigned HOST_WIDE_INT elt, count, const_k;
7285 }
7286 (switch
7287 /* We keep an exact subset of the constructor elements. */
7288 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
7289 (if (CONSTRUCTOR_NELTS (ctor) == 0)
7290 { build_zero_cst (type); }
7291 (if (count == 1)
7292 (if (elt < CONSTRUCTOR_NELTS (ctor))
7293 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
7294 { build_zero_cst (type); })
7295 /* We don't want to emit new CTORs unless the old one goes away.
7296 ??? Eventually allow this if the CTOR ends up constant or
7297 uniform. */
7298 (if (single_use (@0))
7299 (with
7300 {
7301 vec<constructor_elt, va_gc> *vals;
7302 vec_alloc (vals, count);
7303 bool constant_p = true;
7304 tree res;
7305 for (unsigned i = 0;
7306 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
7307 {
7308 tree e = CONSTRUCTOR_ELT (ctor, elt + i)->value;
7309 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, e);
7310 if (!CONSTANT_CLASS_P (e))
7311 constant_p = false;
7312 }
7313 tree evtype = (types_match (TREE_TYPE (type),
7314 TREE_TYPE (TREE_TYPE (ctor)))
7315 ? type
7316 : build_vector_type (TREE_TYPE (TREE_TYPE (ctor)),
7317 count * k));
7318 res = (constant_p ? build_vector_from_ctor (evtype, vals)
7319 : build_constructor (evtype, vals));
7320 }
7321 (view_convert { res; }))))))
7322 /* The bitfield references a single constructor element. */
7323 (if (k.is_constant (&const_k)
7324 && idx + n <= (idx / const_k + 1) * const_k)
7325 (switch
7326 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
7327 { build_zero_cst (type); })
7328 (if (n == const_k)
7329 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
7330 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
7331 @1 { bitsize_int ((idx % const_k) * width); })))))))))
7332
7333 /* Simplify a bit extraction from a bit insertion for the cases with
7334 the inserted element fully covering the extraction or the insertion
7335 not touching the extraction. */
7336 (simplify
7337 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
7338 (with
7339 {
7340 unsigned HOST_WIDE_INT isize;
7341 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
7342 isize = TYPE_PRECISION (TREE_TYPE (@1));
7343 else
7344 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
7345 }
7346 (switch
7347 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
7348 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
7349 wi::to_wide (@ipos) + isize))
7350 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
7351 wi::to_wide (@rpos)
7352 - wi::to_wide (@ipos)); }))
7353 (if (wi::geu_p (wi::to_wide (@ipos),
7354 wi::to_wide (@rpos) + wi::to_wide (@rsize))
7355 || wi::geu_p (wi::to_wide (@rpos),
7356 wi::to_wide (@ipos) + isize))
7357 (BIT_FIELD_REF @0 @rsize @rpos)))))
7358
7359 (if (canonicalize_math_after_vectorization_p ())
7360 (for fmas (FMA)
7361 (simplify
7362 (fmas:c (negate @0) @1 @2)
7363 (IFN_FNMA @0 @1 @2))
7364 (simplify
7365 (fmas @0 @1 (negate @2))
7366 (IFN_FMS @0 @1 @2))
7367 (simplify
7368 (fmas:c (negate @0) @1 (negate @2))
7369 (IFN_FNMS @0 @1 @2))
7370 (simplify
7371 (negate (fmas@3 @0 @1 @2))
7372 (if (single_use (@3))
7373 (IFN_FNMS @0 @1 @2))))
7374
7375 (simplify
7376 (IFN_FMS:c (negate @0) @1 @2)
7377 (IFN_FNMS @0 @1 @2))
7378 (simplify
7379 (IFN_FMS @0 @1 (negate @2))
7380 (IFN_FMA @0 @1 @2))
7381 (simplify
7382 (IFN_FMS:c (negate @0) @1 (negate @2))
7383 (IFN_FNMA @0 @1 @2))
7384 (simplify
7385 (negate (IFN_FMS@3 @0 @1 @2))
7386 (if (single_use (@3))
7387 (IFN_FNMA @0 @1 @2)))
7388
7389 (simplify
7390 (IFN_FNMA:c (negate @0) @1 @2)
7391 (IFN_FMA @0 @1 @2))
7392 (simplify
7393 (IFN_FNMA @0 @1 (negate @2))
7394 (IFN_FNMS @0 @1 @2))
7395 (simplify
7396 (IFN_FNMA:c (negate @0) @1 (negate @2))
7397 (IFN_FMS @0 @1 @2))
7398 (simplify
7399 (negate (IFN_FNMA@3 @0 @1 @2))
7400 (if (single_use (@3))
7401 (IFN_FMS @0 @1 @2)))
7402
7403 (simplify
7404 (IFN_FNMS:c (negate @0) @1 @2)
7405 (IFN_FMS @0 @1 @2))
7406 (simplify
7407 (IFN_FNMS @0 @1 (negate @2))
7408 (IFN_FNMA @0 @1 @2))
7409 (simplify
7410 (IFN_FNMS:c (negate @0) @1 (negate @2))
7411 (IFN_FMA @0 @1 @2))
7412 (simplify
7413 (negate (IFN_FNMS@3 @0 @1 @2))
7414 (if (single_use (@3))
7415 (IFN_FMA @0 @1 @2))))
7416
7417 /* CLZ simplifications. */
7418 (for clz (CLZ)
7419 (for op (eq ne)
7420 cmp (lt ge)
7421 (simplify
7422 (op (clz:s@2 @0) INTEGER_CST@1)
7423 (if (integer_zerop (@1) && single_use (@2))
7424 /* clz(X) == 0 is (int)X < 0 and clz(X) != 0 is (int)X >= 0. */
7425 (with { tree type0 = TREE_TYPE (@0);
7426 tree stype = signed_type_for (type0);
7427 HOST_WIDE_INT val = 0;
7428 /* Punt on hypothetical weird targets. */
7429 if (clz == CFN_CLZ
7430 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7431 val) == 2
7432 && val == 0)
7433 stype = NULL_TREE;
7434 }
7435 (if (stype)
7436 (cmp (convert:stype @0) { build_zero_cst (stype); })))
7437 /* clz(X) == (prec-1) is X == 1 and clz(X) != (prec-1) is X != 1. */
7438 (with { bool ok = true;
7439 HOST_WIDE_INT val = 0;
7440 tree type0 = TREE_TYPE (@0);
7441 /* Punt on hypothetical weird targets. */
7442 if (clz == CFN_CLZ
7443 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7444 val) == 2
7445 && val == TYPE_PRECISION (type0) - 1)
7446 ok = false;
7447 }
7448 (if (ok && wi::to_wide (@1) == (TYPE_PRECISION (type0) - 1))
7449 (op @0 { build_one_cst (type0); })))))))
7450
7451 /* CTZ simplifications. */
7452 (for ctz (CTZ)
7453 (for op (ge gt le lt)
7454 cmp (eq eq ne ne)
7455 (simplify
7456 /* __builtin_ctz (x) >= C -> (x & ((1 << C) - 1)) == 0. */
7457 (op (ctz:s @0) INTEGER_CST@1)
7458 (with { bool ok = true;
7459 HOST_WIDE_INT val = 0;
7460 if (!tree_fits_shwi_p (@1))
7461 ok = false;
7462 else
7463 {
7464 val = tree_to_shwi (@1);
7465 /* Canonicalize to >= or <. */
7466 if (op == GT_EXPR || op == LE_EXPR)
7467 {
7468 if (val == HOST_WIDE_INT_MAX)
7469 ok = false;
7470 else
7471 val++;
7472 }
7473 }
7474 bool zero_res = false;
7475 HOST_WIDE_INT zero_val = 0;
7476 tree type0 = TREE_TYPE (@0);
7477 int prec = TYPE_PRECISION (type0);
7478 if (ctz == CFN_CTZ
7479 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7480 zero_val) == 2)
7481 zero_res = true;
7482 }
7483 (if (val <= 0)
7484 (if (ok && (!zero_res || zero_val >= val))
7485 { constant_boolean_node (cmp == EQ_EXPR ? true : false, type); })
7486 (if (val >= prec)
7487 (if (ok && (!zero_res || zero_val < val))
7488 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); })
7489 (if (ok && (!zero_res || zero_val < 0 || zero_val >= prec))
7490 (cmp (bit_and @0 { wide_int_to_tree (type0,
7491 wi::mask (val, false, prec)); })
7492 { build_zero_cst (type0); })))))))
7493 (for op (eq ne)
7494 (simplify
7495 /* __builtin_ctz (x) == C -> (x & ((1 << (C + 1)) - 1)) == (1 << C). */
7496 (op (ctz:s @0) INTEGER_CST@1)
7497 (with { bool zero_res = false;
7498 HOST_WIDE_INT zero_val = 0;
7499 tree type0 = TREE_TYPE (@0);
7500 int prec = TYPE_PRECISION (type0);
7501 if (ctz == CFN_CTZ
7502 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_TYPE_MODE (type0),
7503 zero_val) == 2)
7504 zero_res = true;
7505 }
7506 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) >= prec)
7507 (if (!zero_res || zero_val != wi::to_widest (@1))
7508 { constant_boolean_node (op == EQ_EXPR ? false : true, type); })
7509 (if (!zero_res || zero_val < 0 || zero_val >= prec)
7510 (op (bit_and @0 { wide_int_to_tree (type0,
7511 wi::mask (tree_to_uhwi (@1) + 1,
7512 false, prec)); })
7513 { wide_int_to_tree (type0,
7514 wi::shifted_mask (tree_to_uhwi (@1), 1,
7515 false, prec)); })))))))
7516
7517 /* POPCOUNT simplifications. */
7518 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
7519 (simplify
7520 (plus (POPCOUNT:s @0) (POPCOUNT:s @1))
7521 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
7522 (POPCOUNT (bit_ior @0 @1))))
7523
7524 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
7525 (for popcount (POPCOUNT)
7526 (for cmp (le eq ne gt)
7527 rep (eq eq ne ne)
7528 (simplify
7529 (cmp (popcount @0) integer_zerop)
7530 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
7531
7532 /* Canonicalize POPCOUNT(x)&1 as PARITY(X). */
7533 (simplify
7534 (bit_and (POPCOUNT @0) integer_onep)
7535 (PARITY @0))
7536
7537 /* PARITY simplifications. */
7538 /* parity(~X) is parity(X). */
7539 (simplify
7540 (PARITY (bit_not @0))
7541 (PARITY @0))
7542
7543 /* parity(X)^parity(Y) is parity(X^Y). */
7544 (simplify
7545 (bit_xor (PARITY:s @0) (PARITY:s @1))
7546 (PARITY (bit_xor @0 @1)))
7547
7548 /* Common POPCOUNT/PARITY simplifications. */
7549 /* popcount(X&C1) is (X>>C2)&1 when C1 == 1<<C2. Same for parity(X&C1). */
7550 (for pfun (POPCOUNT PARITY)
7551 (simplify
7552 (pfun @0)
7553 (with { wide_int nz = tree_nonzero_bits (@0); }
7554 (switch
7555 (if (nz == 1)
7556 (convert @0))
7557 (if (wi::popcount (nz) == 1)
7558 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
7559 (convert (rshift:utype (convert:utype @0)
7560 { build_int_cst (integer_type_node,
7561 wi::ctz (nz)); }))))))))
7562
7563 #if GIMPLE
7564 /* 64- and 32-bits branchless implementations of popcount are detected:
7565
7566 int popcount64c (uint64_t x)
7567 {
7568 x -= (x >> 1) & 0x5555555555555555ULL;
7569 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
7570 x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
7571 return (x * 0x0101010101010101ULL) >> 56;
7572 }
7573
7574 int popcount32c (uint32_t x)
7575 {
7576 x -= (x >> 1) & 0x55555555;
7577 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
7578 x = (x + (x >> 4)) & 0x0f0f0f0f;
7579 return (x * 0x01010101) >> 24;
7580 } */
7581 (simplify
7582 (rshift
7583 (mult
7584 (bit_and
7585 (plus:c
7586 (rshift @8 INTEGER_CST@5)
7587 (plus:c@8
7588 (bit_and @6 INTEGER_CST@7)
7589 (bit_and
7590 (rshift
7591 (minus@6 @0
7592 (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11))
7593 INTEGER_CST@10)
7594 INTEGER_CST@9)))
7595 INTEGER_CST@3)
7596 INTEGER_CST@2)
7597 INTEGER_CST@1)
7598 /* Check constants and optab. */
7599 (with { unsigned prec = TYPE_PRECISION (type);
7600 int shift = (64 - prec) & 63;
7601 unsigned HOST_WIDE_INT c1
7602 = HOST_WIDE_INT_UC (0x0101010101010101) >> shift;
7603 unsigned HOST_WIDE_INT c2
7604 = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift;
7605 unsigned HOST_WIDE_INT c3
7606 = HOST_WIDE_INT_UC (0x3333333333333333) >> shift;
7607 unsigned HOST_WIDE_INT c4
7608 = HOST_WIDE_INT_UC (0x5555555555555555) >> shift;
7609 }
7610 (if (prec >= 16
7611 && prec <= 64
7612 && pow2p_hwi (prec)
7613 && TYPE_UNSIGNED (type)
7614 && integer_onep (@4)
7615 && wi::to_widest (@10) == 2
7616 && wi::to_widest (@5) == 4
7617 && wi::to_widest (@1) == prec - 8
7618 && tree_to_uhwi (@2) == c1
7619 && tree_to_uhwi (@3) == c2
7620 && tree_to_uhwi (@9) == c3
7621 && tree_to_uhwi (@7) == c3
7622 && tree_to_uhwi (@11) == c4)
7623 (if (direct_internal_fn_supported_p (IFN_POPCOUNT, type,
7624 OPTIMIZE_FOR_BOTH))
7625 (convert (IFN_POPCOUNT:type @0))
7626 /* Try to do popcount in two halves. PREC must be at least
7627 five bits for this to work without extension before adding. */
7628 (with {
7629 tree half_type = NULL_TREE;
7630 opt_machine_mode m = mode_for_size ((prec + 1) / 2, MODE_INT, 1);
7631 int half_prec = 8;
7632 if (m.exists ()
7633 && m.require () != TYPE_MODE (type))
7634 {
7635 half_prec = GET_MODE_PRECISION (as_a <scalar_int_mode> (m));
7636 half_type = build_nonstandard_integer_type (half_prec, 1);
7637 }
7638 gcc_assert (half_prec > 2);
7639 }
7640 (if (half_type != NULL_TREE
7641 && direct_internal_fn_supported_p (IFN_POPCOUNT, half_type,
7642 OPTIMIZE_FOR_BOTH))
7643 (convert (plus
7644 (IFN_POPCOUNT:half_type (convert @0))
7645 (IFN_POPCOUNT:half_type (convert (rshift @0
7646 { build_int_cst (integer_type_node, half_prec); } )))))))))))
7647
7648 /* __builtin_ffs needs to deal on many targets with the possible zero
7649 argument. If we know the argument is always non-zero, __builtin_ctz + 1
7650 should lead to better code. */
7651 (simplify
7652 (FFS tree_expr_nonzero_p@0)
7653 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
7654 && direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@0),
7655 OPTIMIZE_FOR_SPEED))
7656 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
7657 (plus (CTZ:type (convert:utype @0)) { build_one_cst (type); }))))
7658 #endif
7659
7660 (for ffs (BUILT_IN_FFS BUILT_IN_FFSL BUILT_IN_FFSLL
7661 BUILT_IN_FFSIMAX)
7662 /* __builtin_ffs (X) == 0 -> X == 0.
7663 __builtin_ffs (X) == 6 -> (X & 63) == 32. */
7664 (for cmp (eq ne)
7665 (simplify
7666 (cmp (ffs@2 @0) INTEGER_CST@1)
7667 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
7668 (switch
7669 (if (integer_zerop (@1))
7670 (cmp @0 { build_zero_cst (TREE_TYPE (@0)); }))
7671 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) > prec)
7672 { constant_boolean_node (cmp == NE_EXPR ? true : false, type); })
7673 (if (single_use (@2))
7674 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0),
7675 wi::mask (tree_to_uhwi (@1),
7676 false, prec)); })
7677 { wide_int_to_tree (TREE_TYPE (@0),
7678 wi::shifted_mask (tree_to_uhwi (@1) - 1, 1,
7679 false, prec)); }))))))
7680
7681 /* __builtin_ffs (X) > 6 -> X != 0 && (X & 63) == 0. */
7682 (for cmp (gt le)
7683 cmp2 (ne eq)
7684 cmp3 (eq ne)
7685 bit_op (bit_and bit_ior)
7686 (simplify
7687 (cmp (ffs@2 @0) INTEGER_CST@1)
7688 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
7689 (switch
7690 (if (integer_zerop (@1))
7691 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))
7692 (if (tree_int_cst_sgn (@1) < 0)
7693 { constant_boolean_node (cmp == GT_EXPR ? true : false, type); })
7694 (if (wi::to_widest (@1) >= prec)
7695 { constant_boolean_node (cmp == GT_EXPR ? false : true, type); })
7696 (if (wi::to_widest (@1) == prec - 1)
7697 (cmp3 @0 { wide_int_to_tree (TREE_TYPE (@0),
7698 wi::shifted_mask (prec - 1, 1,
7699 false, prec)); }))
7700 (if (single_use (@2))
7701 (bit_op (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })
7702 (cmp3 (bit_and @0
7703 { wide_int_to_tree (TREE_TYPE (@0),
7704 wi::mask (tree_to_uhwi (@1),
7705 false, prec)); })
7706 { build_zero_cst (TREE_TYPE (@0)); }))))))))
7707
7708 #if GIMPLE
7709
7710 /* Simplify:
7711 a = op a1
7712 r = cond ? a : b
7713 --> r = .COND_FN (cond, a, b)
7714 and,
7715 a = op a1
7716 r = cond ? b : a
7717 --> r = .COND_FN (~cond, b, a). */
7718
7719 (for uncond_op (UNCOND_UNARY)
7720 cond_op (COND_UNARY)
7721 (simplify
7722 (vec_cond @0 (view_convert? (uncond_op@3 @1)) @2)
7723 (with { tree op_type = TREE_TYPE (@3); }
7724 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
7725 && is_truth_type_for (op_type, TREE_TYPE (@0)))
7726 (cond_op @0 @1 @2))))
7727 (simplify
7728 (vec_cond @0 @1 (view_convert? (uncond_op@3 @2)))
7729 (with { tree op_type = TREE_TYPE (@3); }
7730 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
7731 && is_truth_type_for (op_type, TREE_TYPE (@0)))
7732 (cond_op (bit_not @0) @2 @1)))))
7733
7734 /* Simplify:
7735
7736 a = a1 op a2
7737 r = c ? a : b;
7738
7739 to:
7740
7741 r = c ? a1 op a2 : b;
7742
7743 if the target can do it in one go. This makes the operation conditional
7744 on c, so could drop potentially-trapping arithmetic, but that's a valid
7745 simplification if the result of the operation isn't needed.
7746
7747 Avoid speculatively generating a stand-alone vector comparison
7748 on targets that might not support them. Any target implementing
7749 conditional internal functions must support the same comparisons
7750 inside and outside a VEC_COND_EXPR. */
7751
7752 (for uncond_op (UNCOND_BINARY)
7753 cond_op (COND_BINARY)
7754 (simplify
7755 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
7756 (with { tree op_type = TREE_TYPE (@4); }
7757 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
7758 && is_truth_type_for (op_type, TREE_TYPE (@0))
7759 && single_use (@4))
7760 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
7761 (simplify
7762 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
7763 (with { tree op_type = TREE_TYPE (@4); }
7764 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
7765 && is_truth_type_for (op_type, TREE_TYPE (@0))
7766 && single_use (@4))
7767 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
7768
7769 /* Same for ternary operations. */
7770 (for uncond_op (UNCOND_TERNARY)
7771 cond_op (COND_TERNARY)
7772 (simplify
7773 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
7774 (with { tree op_type = TREE_TYPE (@5); }
7775 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
7776 && is_truth_type_for (op_type, TREE_TYPE (@0))
7777 && single_use (@5))
7778 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
7779 (simplify
7780 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
7781 (with { tree op_type = TREE_TYPE (@5); }
7782 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
7783 && is_truth_type_for (op_type, TREE_TYPE (@0))
7784 && single_use (@5))
7785 (view_convert (cond_op (bit_not @0) @2 @3 @4
7786 (view_convert:op_type @1)))))))
7787 #endif
7788
7789 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
7790 "else" value of an IFN_COND_*. */
7791 (for cond_op (COND_BINARY)
7792 (simplify
7793 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
7794 (with { tree op_type = TREE_TYPE (@3); }
7795 (if (element_precision (type) == element_precision (op_type))
7796 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
7797 (simplify
7798 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
7799 (with { tree op_type = TREE_TYPE (@5); }
7800 (if (inverse_conditions_p (@0, @2)
7801 && element_precision (type) == element_precision (op_type))
7802 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
7803
7804 /* Same for ternary operations. */
7805 (for cond_op (COND_TERNARY)
7806 (simplify
7807 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
7808 (with { tree op_type = TREE_TYPE (@4); }
7809 (if (element_precision (type) == element_precision (op_type))
7810 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
7811 (simplify
7812 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
7813 (with { tree op_type = TREE_TYPE (@6); }
7814 (if (inverse_conditions_p (@0, @2)
7815 && element_precision (type) == element_precision (op_type))
7816 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
7817
7818 /* Detect simplication for a conditional reduction where
7819
7820 a = mask1 ? b : 0
7821 c = mask2 ? d + a : d
7822
7823 is turned into
7824
7825 c = mask1 && mask2 ? d + b : d. */
7826 (simplify
7827 (IFN_COND_ADD @0 @1 (vec_cond @2 @3 integer_zerop) @1)
7828 (IFN_COND_ADD (bit_and @0 @2) @1 @3 @1))
7829
7830 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
7831 expressions like:
7832
7833 A: (@0 + @1 < @2) | (@2 + @1 < @0)
7834 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
7835
7836 If pointers are known not to wrap, B checks whether @1 bytes starting
7837 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
7838 bytes. A is more efficiently tested as:
7839
7840 A: (sizetype) (@0 + @1 - @2) > @1 * 2
7841
7842 The equivalent expression for B is given by replacing @1 with @1 - 1:
7843
7844 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
7845
7846 @0 and @2 can be swapped in both expressions without changing the result.
7847
7848 The folds rely on sizetype's being unsigned (which is always true)
7849 and on its being the same width as the pointer (which we have to check).
7850
7851 The fold replaces two pointer_plus expressions, two comparisons and
7852 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
7853 the best case it's a saving of two operations. The A fold retains one
7854 of the original pointer_pluses, so is a win even if both pointer_pluses
7855 are used elsewhere. The B fold is a wash if both pointer_pluses are
7856 used elsewhere, since all we end up doing is replacing a comparison with
7857 a pointer_plus. We do still apply the fold under those circumstances
7858 though, in case applying it to other conditions eventually makes one of the
7859 pointer_pluses dead. */
7860 (for ior (truth_orif truth_or bit_ior)
7861 (for cmp (le lt)
7862 (simplify
7863 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
7864 (cmp:cs (pointer_plus@4 @2 @1) @0))
7865 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
7866 && TYPE_OVERFLOW_WRAPS (sizetype)
7867 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
7868 /* Calculate the rhs constant. */
7869 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
7870 offset_int rhs = off * 2; }
7871 /* Always fails for negative values. */
7872 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
7873 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
7874 pick a canonical order. This increases the chances of using the
7875 same pointer_plus in multiple checks. */
7876 (with { bool swap_p = tree_swap_operands_p (@0, @2);
7877 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
7878 (if (cmp == LT_EXPR)
7879 (gt (convert:sizetype
7880 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
7881 { swap_p ? @0 : @2; }))
7882 { rhs_tree; })
7883 (gt (convert:sizetype
7884 (pointer_diff:ssizetype
7885 (pointer_plus { swap_p ? @2 : @0; }
7886 { wide_int_to_tree (sizetype, off); })
7887 { swap_p ? @0 : @2; }))
7888 { rhs_tree; })))))))))
7889
7890 /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
7891 element of @1. */
7892 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
7893 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
7894 (with { int i = single_nonzero_element (@1); }
7895 (if (i >= 0)
7896 (with { tree elt = vector_cst_elt (@1, i);
7897 tree elt_type = TREE_TYPE (elt);
7898 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
7899 tree size = bitsize_int (elt_bits);
7900 tree pos = bitsize_int (elt_bits * i); }
7901 (view_convert
7902 (bit_and:elt_type
7903 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
7904 { elt; })))))))
7905
7906 /* Fold reduction of a single nonzero element constructor. */
7907 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
7908 (simplify (reduc (CONSTRUCTOR@0))
7909 (with { tree ctor = (TREE_CODE (@0) == SSA_NAME
7910 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
7911 tree elt = ctor_single_nonzero_element (ctor); }
7912 (if (elt
7913 && !HONOR_SNANS (type)
7914 && !HONOR_SIGNED_ZEROS (type))
7915 { elt; }))))
7916
7917 /* Fold REDUC (@0 op VECTOR_CST) as REDUC (@0) op REDUC (VECTOR_CST). */
7918 (for reduc (IFN_REDUC_PLUS IFN_REDUC_MAX IFN_REDUC_MIN IFN_REDUC_FMAX
7919 IFN_REDUC_FMIN IFN_REDUC_AND IFN_REDUC_IOR IFN_REDUC_XOR)
7920 op (plus max min IFN_FMAX IFN_FMIN bit_and bit_ior bit_xor)
7921 (simplify (reduc (op @0 VECTOR_CST@1))
7922 (op (reduc:type @0) (reduc:type @1))))
7923
7924 (simplify
7925 (vec_perm @0 @1 VECTOR_CST@2)
7926 (with
7927 {
7928 tree op0 = @0, op1 = @1, op2 = @2;
7929 machine_mode result_mode = TYPE_MODE (type);
7930 machine_mode op_mode = TYPE_MODE (TREE_TYPE (op0));
7931
7932 /* Build a vector of integers from the tree mask. */
7933 vec_perm_builder builder;
7934 }
7935 (if (tree_to_vec_perm_builder (&builder, op2))
7936 (with
7937 {
7938 /* Create a vec_perm_indices for the integer vector. */
7939 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
7940 bool single_arg = (op0 == op1);
7941 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
7942 }
7943 (if (sel.series_p (0, 1, 0, 1))
7944 { op0; }
7945 (if (sel.series_p (0, 1, nelts, 1))
7946 { op1; }
7947 (with
7948 {
7949 if (!single_arg)
7950 {
7951 if (sel.all_from_input_p (0))
7952 op1 = op0;
7953 else if (sel.all_from_input_p (1))
7954 {
7955 op0 = op1;
7956 sel.rotate_inputs (1);
7957 }
7958 else if (known_ge (poly_uint64 (sel[0]), nelts))
7959 {
7960 std::swap (op0, op1);
7961 sel.rotate_inputs (1);
7962 }
7963 }
7964 gassign *def;
7965 tree cop0 = op0, cop1 = op1;
7966 if (TREE_CODE (op0) == SSA_NAME
7967 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
7968 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
7969 cop0 = gimple_assign_rhs1 (def);
7970 if (TREE_CODE (op1) == SSA_NAME
7971 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
7972 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
7973 cop1 = gimple_assign_rhs1 (def);
7974 tree t;
7975 }
7976 (if ((TREE_CODE (cop0) == VECTOR_CST
7977 || TREE_CODE (cop0) == CONSTRUCTOR)
7978 && (TREE_CODE (cop1) == VECTOR_CST
7979 || TREE_CODE (cop1) == CONSTRUCTOR)
7980 && (t = fold_vec_perm (type, cop0, cop1, sel)))
7981 { t; }
7982 (with
7983 {
7984 bool changed = (op0 == op1 && !single_arg);
7985 tree ins = NULL_TREE;
7986 unsigned at = 0;
7987
7988 /* See if the permutation is performing a single element
7989 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
7990 in that case. But only if the vector mode is supported,
7991 otherwise this is invalid GIMPLE. */
7992 if (op_mode != BLKmode
7993 && (TREE_CODE (cop0) == VECTOR_CST
7994 || TREE_CODE (cop0) == CONSTRUCTOR
7995 || TREE_CODE (cop1) == VECTOR_CST
7996 || TREE_CODE (cop1) == CONSTRUCTOR))
7997 {
7998 bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1);
7999 if (insert_first_p)
8000 {
8001 /* After canonicalizing the first elt to come from the
8002 first vector we only can insert the first elt from
8003 the first vector. */
8004 at = 0;
8005 if ((ins = fold_read_from_vector (cop0, sel[0])))
8006 op0 = op1;
8007 }
8008 /* The above can fail for two-element vectors which always
8009 appear to insert the first element, so try inserting
8010 into the second lane as well. For more than two
8011 elements that's wasted time. */
8012 if (!insert_first_p || (!ins && maybe_eq (nelts, 2u)))
8013 {
8014 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
8015 for (at = 0; at < encoded_nelts; ++at)
8016 if (maybe_ne (sel[at], at))
8017 break;
8018 if (at < encoded_nelts
8019 && (known_eq (at + 1, nelts)
8020 || sel.series_p (at + 1, 1, at + 1, 1)))
8021 {
8022 if (known_lt (poly_uint64 (sel[at]), nelts))
8023 ins = fold_read_from_vector (cop0, sel[at]);
8024 else
8025 ins = fold_read_from_vector (cop1, sel[at] - nelts);
8026 }
8027 }
8028 }
8029
8030 /* Generate a canonical form of the selector. */
8031 if (!ins && sel.encoding () != builder)
8032 {
8033 /* Some targets are deficient and fail to expand a single
8034 argument permutation while still allowing an equivalent
8035 2-argument version. */
8036 tree oldop2 = op2;
8037 if (sel.ninputs () == 2
8038 || can_vec_perm_const_p (result_mode, op_mode, sel, false))
8039 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
8040 else
8041 {
8042 vec_perm_indices sel2 (builder, 2, nelts);
8043 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false))
8044 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
8045 else
8046 /* Not directly supported with either encoding,
8047 so use the preferred form. */
8048 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
8049 }
8050 if (!operand_equal_p (op2, oldop2, 0))
8051 changed = true;
8052 }
8053 }
8054 (if (ins)
8055 (bit_insert { op0; } { ins; }
8056 { bitsize_int (at * vector_element_bits (type)); })
8057 (if (changed)
8058 (vec_perm { op0; } { op1; } { op2; }))))))))))))
8059
8060 /* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
8061
8062 (match vec_same_elem_p
8063 (vec_duplicate @0))
8064
8065 (match vec_same_elem_p
8066 CONSTRUCTOR@0
8067 (if (TREE_CODE (@0) == SSA_NAME
8068 && uniform_vector_p (gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0))))))
8069
8070 (match vec_same_elem_p
8071 @0
8072 (if (uniform_vector_p (@0))))
8073
8074
8075 (simplify
8076 (vec_perm vec_same_elem_p@0 @0 @1)
8077 @0)
8078
8079 /* Push VEC_PERM earlier if that may help FMA perception (PR101895). */
8080 (simplify
8081 (plus:c (vec_perm:s (mult:c@0 @1 vec_same_elem_p@2) @0 @3) @4)
8082 (if (TREE_CODE (@0) == SSA_NAME && num_imm_uses (@0) == 2)
8083 (plus (mult (vec_perm @1 @1 @3) @2) @4)))
8084 (simplify
8085 (minus (vec_perm:s (mult:c@0 @1 vec_same_elem_p@2) @0 @3) @4)
8086 (if (TREE_CODE (@0) == SSA_NAME && num_imm_uses (@0) == 2)
8087 (minus (mult (vec_perm @1 @1 @3) @2) @4)))
8088
8089
8090 /* Merge
8091 c = VEC_PERM_EXPR <a, b, VCST0>;
8092 d = VEC_PERM_EXPR <c, c, VCST1>;
8093 to
8094 d = VEC_PERM_EXPR <a, b, NEW_VCST>; */
8095
8096 (simplify
8097 (vec_perm (vec_perm@0 @1 @2 VECTOR_CST@3) @0 VECTOR_CST@4)
8098 (if (TYPE_VECTOR_SUBPARTS (type).is_constant ())
8099 (with
8100 {
8101 machine_mode result_mode = TYPE_MODE (type);
8102 machine_mode op_mode = TYPE_MODE (TREE_TYPE (@1));
8103 int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
8104 vec_perm_builder builder0;
8105 vec_perm_builder builder1;
8106 vec_perm_builder builder2 (nelts, nelts, 1);
8107 }
8108 (if (tree_to_vec_perm_builder (&builder0, @3)
8109 && tree_to_vec_perm_builder (&builder1, @4))
8110 (with
8111 {
8112 vec_perm_indices sel0 (builder0, 2, nelts);
8113 vec_perm_indices sel1 (builder1, 1, nelts);
8114
8115 for (int i = 0; i < nelts; i++)
8116 builder2.quick_push (sel0[sel1[i].to_constant ()]);
8117
8118 vec_perm_indices sel2 (builder2, 2, nelts);
8119
8120 tree op0 = NULL_TREE;
8121 if (can_vec_perm_const_p (result_mode, op_mode, sel2, false))
8122 op0 = vec_perm_indices_to_tree (TREE_TYPE (@4), sel2);
8123 }
8124 (if (op0)
8125 (vec_perm @1 @2 { op0; })))))))
8126
8127
8128 /* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop.
8129 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
8130 constant which when multiplied by a power of 2 contains a unique value
8131 in the top 5 or 6 bits. This is then indexed into a table which maps it
8132 to the number of trailing zeroes. */
8133 (match (ctz_table_index @1 @2 @3)
8134 (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))
8135
8136 (match (cond_expr_convert_p @0 @2 @3 @6)
8137 (cond (simple_comparison@6 @0 @1) (convert@4 @2) (convert@5 @3))
8138 (if (INTEGRAL_TYPE_P (type)
8139 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
8140 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
8141 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
8142 && TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (@0))
8143 && TYPE_PRECISION (TREE_TYPE (@0))
8144 == TYPE_PRECISION (TREE_TYPE (@2))
8145 && TYPE_PRECISION (TREE_TYPE (@0))
8146 == TYPE_PRECISION (TREE_TYPE (@3))
8147 /* For vect_recog_cond_expr_convert_pattern, @2 and @3 can differ in
8148 signess when convert is truncation, but not ok for extension since
8149 it's sign_extend vs zero_extend. */
8150 && (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)
8151 || (TYPE_UNSIGNED (TREE_TYPE (@2))
8152 == TYPE_UNSIGNED (TREE_TYPE (@3))))
8153 && single_use (@4)
8154 && single_use (@5))))
8155
8156 (for bit_op (bit_and bit_ior bit_xor)
8157 (match (bitwise_induction_p @0 @2 @3)
8158 (bit_op:c
8159 (nop_convert1? (bit_not2?@0 (convert3? (lshift integer_onep@1 @2))))
8160 @3)))
8161
8162 (match (bitwise_induction_p @0 @2 @3)
8163 (bit_not
8164 (nop_convert1? (bit_xor@0 (convert2? (lshift integer_onep@1 @2)) @3))))
8165
8166 /* n - (((n > C1) ? n : C1) & -C2) -> n & C1 for unsigned case.
8167 n - (((n > C1) ? n : C1) & -C2) -> (n <= C1) ? n : (n & C1) for signed case. */
8168 (simplify
8169 (minus @0 (bit_and (max @0 INTEGER_CST@1) INTEGER_CST@2))
8170 (with { auto i = wi::neg (wi::to_wide (@2)); }
8171 /* Check if -C2 is a power of 2 and C1 = -C2 - 1. */
8172 (if (wi::popcount (i) == 1
8173 && (wi::to_wide (@1)) == (i - 1))
8174 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
8175 (bit_and @0 @1)
8176 (cond (le @0 @1) @0 (bit_and @0 @1))))))
8177
8178 /* -x & 1 -> x & 1. */
8179 (simplify
8180 (bit_and (negate @0) integer_onep@1)
8181 (if (!TYPE_OVERFLOW_SANITIZED (type))
8182 (bit_and @0 @1)))