]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
rs6000-c.c (P9V_BUILTIN_VEC_XL_LEN_R, [...]): Add support for builtins vector unsigne...
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
cbe34bb5 5 Copyright (C) 2014-2017 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
f3582e54 32 CONSTANT_CLASS_P
887ab609 33 tree_expr_nonnegative_p
e36c1cfe 34 tree_expr_nonzero_p
67dbe582 35 integer_valued_real_p
53a19317
RB
36 integer_pow2p
37 HONOR_NANS)
e0ee10ed 38
f84e7fd6
RB
39/* Operator lists. */
40(define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42(define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44(define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
46(define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
48(define_operator_list simple_comparison lt le eq ne ge gt)
49(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
b1dc4a20 51#include "cfn-operators.pd"
257aecb4 52
543a9bcd
RS
53/* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
543a9bcd
RS
73DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
ed73f46f
MG
77
78/* As opposed to convert?, this still creates a single pattern, so
79 it is not a suitable replacement for convert? in all cases. */
80(match (nop_convert @0)
81 (convert @0)
82 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
83(match (nop_convert @0)
84 (view_convert @0)
85 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
86 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
87 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
88/* This one has to be last, or it shadows the others. */
89(match (nop_convert @0)
90 @0)
f84e7fd6 91
e0ee10ed 92/* Simplifications of operations with one constant operand and
36a60e48 93 simplifications to constants or single values. */
e0ee10ed
RB
94
95(for op (plus pointer_plus minus bit_ior bit_xor)
96 (simplify
97 (op @0 integer_zerop)
98 (non_lvalue @0)))
99
a499aac5
RB
100/* 0 +p index -> (type)index */
101(simplify
102 (pointer_plus integer_zerop @1)
103 (non_lvalue (convert @1)))
104
a7f24614
RB
105/* See if ARG1 is zero and X + ARG1 reduces to X.
106 Likewise if the operands are reversed. */
107(simplify
108 (plus:c @0 real_zerop@1)
109 (if (fold_real_zero_addition_p (type, @1, 0))
110 (non_lvalue @0)))
111
112/* See if ARG1 is zero and X - ARG1 reduces to X. */
113(simplify
114 (minus @0 real_zerop@1)
115 (if (fold_real_zero_addition_p (type, @1, 1))
116 (non_lvalue @0)))
117
e0ee10ed
RB
118/* Simplify x - x.
119 This is unsafe for certain floats even in non-IEEE formats.
120 In IEEE, it is unsafe because it does wrong for NaNs.
121 Also note that operand_equal_p is always false if an operand
122 is volatile. */
123(simplify
a7f24614 124 (minus @0 @0)
1b457aa4 125 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 126 { build_zero_cst (type); }))
e0ee10ed
RB
127
128(simplify
a7f24614
RB
129 (mult @0 integer_zerop@1)
130 @1)
131
132/* Maybe fold x * 0 to 0. The expressions aren't the same
133 when x is NaN, since x * 0 is also NaN. Nor are they the
134 same in modes with signed zeros, since multiplying a
135 negative value by 0 gives -0, not +0. */
136(simplify
137 (mult @0 real_zerop@1)
8b5ee871 138 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
139 @1))
140
141/* In IEEE floating point, x*1 is not equivalent to x for snans.
142 Likewise for complex arithmetic with signed zeros. */
143(simplify
144 (mult @0 real_onep)
8b5ee871
MG
145 (if (!HONOR_SNANS (type)
146 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
147 || !COMPLEX_FLOAT_TYPE_P (type)))
148 (non_lvalue @0)))
149
150/* Transform x * -1.0 into -x. */
151(simplify
152 (mult @0 real_minus_onep)
8b5ee871
MG
153 (if (!HONOR_SNANS (type)
154 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
155 || !COMPLEX_FLOAT_TYPE_P (type)))
156 (negate @0)))
e0ee10ed 157
8c2805bb
AP
158(for cmp (gt ge lt le)
159 outp (convert convert negate negate)
160 outn (negate negate convert convert)
161 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
162 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
163 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
164 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
165 (simplify
166 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
167 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
168 && types_match (type, TREE_TYPE (@0)))
169 (switch
170 (if (types_match (type, float_type_node))
171 (BUILT_IN_COPYSIGNF @1 (outp @0)))
172 (if (types_match (type, double_type_node))
173 (BUILT_IN_COPYSIGN @1 (outp @0)))
174 (if (types_match (type, long_double_type_node))
175 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
176 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
177 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
178 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
179 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
180 (simplify
181 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
182 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
183 && types_match (type, TREE_TYPE (@0)))
184 (switch
185 (if (types_match (type, float_type_node))
186 (BUILT_IN_COPYSIGNF @1 (outn @0)))
187 (if (types_match (type, double_type_node))
188 (BUILT_IN_COPYSIGN @1 (outn @0)))
189 (if (types_match (type, long_double_type_node))
190 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
191
192/* Transform X * copysign (1.0, X) into abs(X). */
193(simplify
194 (mult:c @0 (COPYSIGN real_onep @0))
195 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
196 (abs @0)))
197
198/* Transform X * copysign (1.0, -X) into -abs(X). */
199(simplify
200 (mult:c @0 (COPYSIGN real_onep (negate @0)))
201 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
202 (negate (abs @0))))
203
204/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
205(simplify
206 (COPYSIGN REAL_CST@0 @1)
207 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
208 (COPYSIGN (negate @0) @1)))
209
5b7f6ed0 210/* X * 1, X / 1 -> X. */
e0ee10ed
RB
211(for op (mult trunc_div ceil_div floor_div round_div exact_div)
212 (simplify
213 (op @0 integer_onep)
214 (non_lvalue @0)))
215
71f82be9
JG
216/* (A / (1 << B)) -> (A >> B).
217 Only for unsigned A. For signed A, this would not preserve rounding
218 toward zero.
219 For example: (-1 / ( 1 << B)) != -1 >> B. */
220(simplify
221 (trunc_div @0 (lshift integer_onep@1 @2))
222 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
223 && (!VECTOR_TYPE_P (type)
224 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
225 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
226 (rshift @0 @2)))
227
5b7f6ed0
MG
228/* Preserve explicit divisions by 0: the C++ front-end wants to detect
229 undefined behavior in constexpr evaluation, and assuming that the division
230 traps enables better optimizations than these anyway. */
a7f24614 231(for div (trunc_div ceil_div floor_div round_div exact_div)
5b7f6ed0
MG
232 /* 0 / X is always zero. */
233 (simplify
234 (div integer_zerop@0 @1)
235 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
236 (if (!integer_zerop (@1))
237 @0))
da186c1f 238 /* X / -1 is -X. */
a7f24614 239 (simplify
09240451
MG
240 (div @0 integer_minus_onep@1)
241 (if (!TYPE_UNSIGNED (type))
da186c1f 242 (negate @0)))
5b7f6ed0
MG
243 /* X / X is one. */
244 (simplify
245 (div @0 @0)
9ebce098
JJ
246 /* But not for 0 / 0 so that we can get the proper warnings and errors.
247 And not for _Fract types where we can't build 1. */
248 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
5b7f6ed0 249 { build_one_cst (type); }))
da186c1f
RB
250 /* X / abs (X) is X < 0 ? -1 : 1. */
251 (simplify
d96a5585
RB
252 (div:C @0 (abs @0))
253 (if (INTEGRAL_TYPE_P (type)
da186c1f
RB
254 && TYPE_OVERFLOW_UNDEFINED (type))
255 (cond (lt @0 { build_zero_cst (type); })
256 { build_minus_one_cst (type); } { build_one_cst (type); })))
257 /* X / -X is -1. */
258 (simplify
d96a5585 259 (div:C @0 (negate @0))
da186c1f
RB
260 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
261 && TYPE_OVERFLOW_UNDEFINED (type))
262 { build_minus_one_cst (type); })))
a7f24614
RB
263
264/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
265 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
266(simplify
267 (floor_div @0 @1)
09240451
MG
268 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
269 && TYPE_UNSIGNED (type))
a7f24614
RB
270 (trunc_div @0 @1)))
271
28093105
RB
272/* Combine two successive divisions. Note that combining ceil_div
273 and floor_div is trickier and combining round_div even more so. */
274(for div (trunc_div exact_div)
c306cfaf
RB
275 (simplify
276 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
277 (with {
278 bool overflow_p;
279 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
280 }
281 (if (!overflow_p)
8fdc6c67
RB
282 (div @0 { wide_int_to_tree (type, mul); })
283 (if (TYPE_UNSIGNED (type)
284 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
285 { build_zero_cst (type); })))))
c306cfaf 286
288fe52e
AM
287/* Combine successive multiplications. Similar to above, but handling
288 overflow is different. */
289(simplify
290 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
291 (with {
292 bool overflow_p;
293 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
294 }
295 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
296 otherwise undefined overflow implies that @0 must be zero. */
297 (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
298 (mult @0 { wide_int_to_tree (type, mul); }))))
299
a7f24614 300/* Optimize A / A to 1.0 if we don't care about
09240451 301 NaNs or Infinities. */
a7f24614
RB
302(simplify
303 (rdiv @0 @0)
09240451 304 (if (FLOAT_TYPE_P (type)
1b457aa4 305 && ! HONOR_NANS (type)
8b5ee871 306 && ! HONOR_INFINITIES (type))
09240451
MG
307 { build_one_cst (type); }))
308
309/* Optimize -A / A to -1.0 if we don't care about
310 NaNs or Infinities. */
311(simplify
e04d2a35 312 (rdiv:C @0 (negate @0))
09240451 313 (if (FLOAT_TYPE_P (type)
1b457aa4 314 && ! HONOR_NANS (type)
8b5ee871 315 && ! HONOR_INFINITIES (type))
09240451 316 { build_minus_one_cst (type); }))
a7f24614 317
8c6961ca
PK
318/* PR71078: x / abs(x) -> copysign (1.0, x) */
319(simplify
320 (rdiv:C (convert? @0) (convert? (abs @0)))
321 (if (SCALAR_FLOAT_TYPE_P (type)
322 && ! HONOR_NANS (type)
323 && ! HONOR_INFINITIES (type))
324 (switch
325 (if (types_match (type, float_type_node))
326 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
327 (if (types_match (type, double_type_node))
328 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
329 (if (types_match (type, long_double_type_node))
330 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
331
a7f24614
RB
332/* In IEEE floating point, x/1 is not equivalent to x for snans. */
333(simplify
334 (rdiv @0 real_onep)
8b5ee871 335 (if (!HONOR_SNANS (type))
a7f24614
RB
336 (non_lvalue @0)))
337
338/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
339(simplify
340 (rdiv @0 real_minus_onep)
8b5ee871 341 (if (!HONOR_SNANS (type))
a7f24614
RB
342 (negate @0)))
343
5711ac88
N
344(if (flag_reciprocal_math)
345 /* Convert (A/B)/C to A/(B*C) */
346 (simplify
347 (rdiv (rdiv:s @0 @1) @2)
348 (rdiv @0 (mult @1 @2)))
349
350 /* Convert A/(B/C) to (A/B)*C */
351 (simplify
352 (rdiv @0 (rdiv:s @1 @2))
353 (mult (rdiv @0 @1) @2)))
354
355/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
356(for div (trunc_div ceil_div floor_div round_div exact_div)
357 (simplify
358 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
359 (if (integer_pow2p (@2)
360 && tree_int_cst_sgn (@2) > 0
361 && wi::add (@2, @1) == 0
362 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
363 (rshift (convert @0) { build_int_cst (integer_type_node,
364 wi::exact_log2 (@2)); }))))
365
a7f24614
RB
366/* If ARG1 is a constant, we can convert this to a multiply by the
367 reciprocal. This does not have the same rounding properties,
368 so only do this if -freciprocal-math. We can actually
369 always safely do it if ARG1 is a power of two, but it's hard to
370 tell if it is or not in a portable manner. */
371(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
372 (simplify
373 (rdiv @0 cst@1)
374 (if (optimize)
53bc4b3a
RB
375 (if (flag_reciprocal_math
376 && !real_zerop (@1))
a7f24614 377 (with
249700b5 378 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 379 (if (tem)
8fdc6c67
RB
380 (mult @0 { tem; } )))
381 (if (cst != COMPLEX_CST)
382 (with { tree inverse = exact_inverse (type, @1); }
383 (if (inverse)
384 (mult @0 { inverse; } ))))))))
a7f24614 385
a7f24614 386(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
387 /* 0 % X is always zero. */
388 (simplify
a7f24614 389 (mod integer_zerop@0 @1)
e0ee10ed
RB
390 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
391 (if (!integer_zerop (@1))
392 @0))
393 /* X % 1 is always zero. */
394 (simplify
a7f24614
RB
395 (mod @0 integer_onep)
396 { build_zero_cst (type); })
397 /* X % -1 is zero. */
398 (simplify
09240451
MG
399 (mod @0 integer_minus_onep@1)
400 (if (!TYPE_UNSIGNED (type))
bc4315fb 401 { build_zero_cst (type); }))
5b7f6ed0
MG
402 /* X % X is zero. */
403 (simplify
404 (mod @0 @0)
405 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
406 (if (!integer_zerop (@0))
407 { build_zero_cst (type); }))
bc4315fb
MG
408 /* (X % Y) % Y is just X % Y. */
409 (simplify
410 (mod (mod@2 @0 @1) @1)
98e30e51
RB
411 @2)
412 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
413 (simplify
414 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
415 (if (ANY_INTEGRAL_TYPE_P (type)
416 && TYPE_OVERFLOW_UNDEFINED (type)
417 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
418 { build_zero_cst (type); })))
a7f24614
RB
419
420/* X % -C is the same as X % C. */
421(simplify
422 (trunc_mod @0 INTEGER_CST@1)
423 (if (TYPE_SIGN (type) == SIGNED
424 && !TREE_OVERFLOW (@1)
425 && wi::neg_p (@1)
426 && !TYPE_OVERFLOW_TRAPS (type)
427 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
428 && !sign_bit_p (@1, @1))
429 (trunc_mod @0 (negate @1))))
e0ee10ed 430
8f0c696a
RB
431/* X % -Y is the same as X % Y. */
432(simplify
433 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
434 (if (INTEGRAL_TYPE_P (type)
435 && !TYPE_UNSIGNED (type)
8f0c696a 436 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
437 && tree_nop_conversion_p (type, TREE_TYPE (@1))
438 /* Avoid this transformation if X might be INT_MIN or
439 Y might be -1, because we would then change valid
440 INT_MIN % -(-1) into invalid INT_MIN % -1. */
441 && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
442 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
443 (TREE_TYPE (@1))))))
8f0c696a
RB
444 (trunc_mod @0 (convert @1))))
445
f461569a
MP
446/* X - (X / Y) * Y is the same as X % Y. */
447(simplify
2eef1fc1
RB
448 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
449 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
fba46f03 450 (convert (trunc_mod @0 @1))))
f461569a 451
8f0c696a
RB
452/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
453 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
454 Also optimize A % (C << N) where C is a power of 2,
455 to A & ((C << N) - 1). */
456(match (power_of_two_cand @1)
457 INTEGER_CST@1)
458(match (power_of_two_cand @1)
459 (lshift INTEGER_CST@1 @2))
460(for mod (trunc_mod floor_mod)
461 (simplify
4ab1e111 462 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
463 (if ((TYPE_UNSIGNED (type)
464 || tree_expr_nonnegative_p (@0))
4ab1e111 465 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 466 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 467 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 468
887ab609
N
469/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
470(simplify
471 (trunc_div (mult @0 integer_pow2p@1) @1)
472 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
473 (bit_and @0 { wide_int_to_tree
474 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
475 false, TYPE_PRECISION (type))); })))
476
5f8d832e
N
477/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
478(simplify
479 (mult (trunc_div @0 integer_pow2p@1) @1)
480 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
481 (bit_and @0 (negate @1))))
482
95765f36
N
483/* Simplify (t * 2) / 2) -> t. */
484(for div (trunc_div ceil_div floor_div round_div exact_div)
485 (simplify
486 (div (mult @0 @1) @1)
487 (if (ANY_INTEGRAL_TYPE_P (type)
488 && TYPE_OVERFLOW_UNDEFINED (type))
489 @0)))
490
d202f9bd 491(for op (negate abs)
9b054b08
RS
492 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
493 (for coss (COS COSH)
494 (simplify
495 (coss (op @0))
496 (coss @0)))
497 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
498 (for pows (POW)
499 (simplify
500 (pows (op @0) REAL_CST@1)
501 (with { HOST_WIDE_INT n; }
502 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 503 (pows @0 @1)))))
de3fbea3
RB
504 /* Likewise for powi. */
505 (for pows (POWI)
506 (simplify
507 (pows (op @0) INTEGER_CST@1)
508 (if (wi::bit_and (@1, 1) == 0)
509 (pows @0 @1))))
5d3498b4
RS
510 /* Strip negate and abs from both operands of hypot. */
511 (for hypots (HYPOT)
512 (simplify
513 (hypots (op @0) @1)
514 (hypots @0 @1))
515 (simplify
516 (hypots @0 (op @1))
517 (hypots @0 @1)))
518 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
519 (for copysigns (COPYSIGN)
520 (simplify
521 (copysigns (op @0) @1)
522 (copysigns @0 @1))))
523
524/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
525(simplify
526 (mult (abs@1 @0) @1)
527 (mult @0 @0))
528
529/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
530(for coss (COS COSH)
531 copysigns (COPYSIGN)
532 (simplify
533 (coss (copysigns @0 @1))
534 (coss @0)))
535
536/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
537(for pows (POW)
538 copysigns (COPYSIGN)
539 (simplify
de3fbea3 540 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
541 (with { HOST_WIDE_INT n; }
542 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
543 (pows @0 @1)))))
de3fbea3
RB
544/* Likewise for powi. */
545(for pows (POWI)
546 copysigns (COPYSIGN)
547 (simplify
548 (pows (copysigns @0 @2) INTEGER_CST@1)
549 (if (wi::bit_and (@1, 1) == 0)
550 (pows @0 @1))))
5d3498b4
RS
551
552(for hypots (HYPOT)
553 copysigns (COPYSIGN)
554 /* hypot(copysign(x, y), z) -> hypot(x, z). */
555 (simplify
556 (hypots (copysigns @0 @1) @2)
557 (hypots @0 @2))
558 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
559 (simplify
560 (hypots @0 (copysigns @1 @2))
561 (hypots @0 @1)))
562
eeb57981
RB
563/* copysign(x, CST) -> [-]abs (x). */
564(for copysigns (COPYSIGN)
565 (simplify
566 (copysigns @0 REAL_CST@1)
567 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
568 (negate (abs @0))
569 (abs @0))))
570
5d3498b4
RS
571/* copysign(copysign(x, y), z) -> copysign(x, z). */
572(for copysigns (COPYSIGN)
573 (simplify
574 (copysigns (copysigns @0 @1) @2)
575 (copysigns @0 @2)))
576
577/* copysign(x,y)*copysign(x,y) -> x*x. */
578(for copysigns (COPYSIGN)
579 (simplify
580 (mult (copysigns@2 @0 @1) @2)
581 (mult @0 @0)))
582
583/* ccos(-x) -> ccos(x). Similarly for ccosh. */
584(for ccoss (CCOS CCOSH)
585 (simplify
586 (ccoss (negate @0))
587 (ccoss @0)))
d202f9bd 588
abcc43f5
RS
589/* cabs(-x) and cos(conj(x)) -> cabs(x). */
590(for ops (conj negate)
591 (for cabss (CABS)
592 (simplify
593 (cabss (ops @0))
594 (cabss @0))))
595
0a8f32b8
RB
596/* Fold (a * (1 << b)) into (a << b) */
597(simplify
598 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
599 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 600 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
601 (lshift @0 @2)))
602
603/* Fold (C1/X)*C2 into (C1*C2)/X. */
604(simplify
ff86345f
RB
605 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
606 (if (flag_associative_math
607 && single_use (@3))
0a8f32b8
RB
608 (with
609 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
610 (if (tem)
611 (rdiv { tem; } @1)))))
612
5711ac88
N
613/* Convert C1/(X*C2) into (C1/C2)/X */
614(simplify
615 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
616 (if (flag_reciprocal_math)
617 (with
618 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
619 (if (tem)
620 (rdiv { tem; } @1)))))
621
0a8f32b8
RB
622/* Simplify ~X & X as zero. */
623(simplify
624 (bit_and:c (convert? @0) (convert? (bit_not @0)))
625 { build_zero_cst (type); })
626
89b80c42
PK
627/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
628(simplify
629 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
630 (if (TYPE_UNSIGNED (type))
631 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
632
7aa13860
PK
633(for bitop (bit_and bit_ior)
634 cmp (eq ne)
a93952d2
JJ
635 /* PR35691: Transform
636 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
637 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
7aa13860
PK
638 (simplify
639 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
640 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
a93952d2
JJ
641 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
642 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
643 (cmp (bit_ior @0 (convert @1)) @2)))
644 /* Transform:
645 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
646 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
647 (simplify
648 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
649 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
650 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
651 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
652 (cmp (bit_and @0 (convert @1)) @2))))
7aa13860 653
10158317
RB
654/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
655(simplify
a9658b11 656 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
657 (minus (bit_xor @0 @1) @1))
658(simplify
659 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
660 (if (wi::bit_not (@2) == @1)
661 (minus (bit_xor @0 @1) @1)))
662
663/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
664(simplify
a8e9f9a3 665 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
666 (minus @1 (bit_xor @0 @1)))
667
42bd89ce
MG
668/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
669(for op (bit_ior bit_xor plus)
670 (simplify
671 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
672 (bit_xor @0 @1))
673 (simplify
674 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
675 (if (wi::bit_not (@2) == @1)
676 (bit_xor @0 @1))))
2066ef6a
PK
677
678/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
679(simplify
680 (bit_ior:c (bit_xor:c @0 @1) @0)
681 (bit_ior @0 @1))
682
d982c5b7
MG
683/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
684#if GIMPLE
685(simplify
686 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
687 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
688 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
689 (bit_xor @0 @1)))
690#endif
10158317 691
bc4315fb
MG
692/* X % Y is smaller than Y. */
693(for cmp (lt ge)
694 (simplify
695 (cmp (trunc_mod @0 @1) @1)
696 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
697 { constant_boolean_node (cmp == LT_EXPR, type); })))
698(for cmp (gt le)
699 (simplify
700 (cmp @1 (trunc_mod @0 @1))
701 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
702 { constant_boolean_node (cmp == GT_EXPR, type); })))
703
e0ee10ed
RB
704/* x | ~0 -> ~0 */
705(simplify
ca0b7ece
RB
706 (bit_ior @0 integer_all_onesp@1)
707 @1)
708
709/* x | 0 -> x */
710(simplify
711 (bit_ior @0 integer_zerop)
712 @0)
e0ee10ed
RB
713
714/* x & 0 -> 0 */
715(simplify
ca0b7ece
RB
716 (bit_and @0 integer_zerop@1)
717 @1)
e0ee10ed 718
a4398a30 719/* ~x | x -> -1 */
8b5ee871
MG
720/* ~x ^ x -> -1 */
721/* ~x + x -> -1 */
722(for op (bit_ior bit_xor plus)
723 (simplify
724 (op:c (convert? @0) (convert? (bit_not @0)))
725 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 726
e0ee10ed
RB
727/* x ^ x -> 0 */
728(simplify
729 (bit_xor @0 @0)
730 { build_zero_cst (type); })
731
36a60e48
RB
732/* Canonicalize X ^ ~0 to ~X. */
733(simplify
734 (bit_xor @0 integer_all_onesp@1)
735 (bit_not @0))
736
737/* x & ~0 -> x */
738(simplify
739 (bit_and @0 integer_all_onesp)
740 (non_lvalue @0))
741
742/* x & x -> x, x | x -> x */
743(for bitop (bit_and bit_ior)
744 (simplify
745 (bitop @0 @0)
746 (non_lvalue @0)))
747
c7986356
MG
748/* x & C -> x if we know that x & ~C == 0. */
749#if GIMPLE
750(simplify
751 (bit_and SSA_NAME@0 INTEGER_CST@1)
752 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
753 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
754 @0))
755#endif
756
0f770b01
RV
757/* x + (x & 1) -> (x + 1) & ~1 */
758(simplify
44fc0a51
RB
759 (plus:c @0 (bit_and:s @0 integer_onep@1))
760 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
761
762/* x & ~(x & y) -> x & ~y */
763/* x | ~(x | y) -> x | ~y */
764(for bitop (bit_and bit_ior)
af563d4b 765 (simplify
44fc0a51
RB
766 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
767 (bitop @0 (bit_not @1))))
af563d4b
MG
768
769/* (x | y) & ~x -> y & ~x */
770/* (x & y) | ~x -> y | ~x */
771(for bitop (bit_and bit_ior)
772 rbitop (bit_ior bit_and)
773 (simplify
774 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
775 (bitop @1 @2)))
0f770b01 776
f13c4673
MP
777/* (x & y) ^ (x | y) -> x ^ y */
778(simplify
2d6f2dce
MP
779 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
780 (bit_xor @0 @1))
f13c4673 781
9ea65ca6
MP
782/* (x ^ y) ^ (x | y) -> x & y */
783(simplify
784 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
785 (bit_and @0 @1))
786
787/* (x & y) + (x ^ y) -> x | y */
788/* (x & y) | (x ^ y) -> x | y */
789/* (x & y) ^ (x ^ y) -> x | y */
790(for op (plus bit_ior bit_xor)
791 (simplify
792 (op:c (bit_and @0 @1) (bit_xor @0 @1))
793 (bit_ior @0 @1)))
794
795/* (x & y) + (x | y) -> x + y */
796(simplify
797 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
798 (plus @0 @1))
799
9737efaf
MP
800/* (x + y) - (x | y) -> x & y */
801(simplify
802 (minus (plus @0 @1) (bit_ior @0 @1))
803 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
804 && !TYPE_SATURATING (type))
805 (bit_and @0 @1)))
806
807/* (x + y) - (x & y) -> x | y */
808(simplify
809 (minus (plus @0 @1) (bit_and @0 @1))
810 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
811 && !TYPE_SATURATING (type))
812 (bit_ior @0 @1)))
813
9ea65ca6
MP
814/* (x | y) - (x ^ y) -> x & y */
815(simplify
816 (minus (bit_ior @0 @1) (bit_xor @0 @1))
817 (bit_and @0 @1))
818
819/* (x | y) - (x & y) -> x ^ y */
820(simplify
821 (minus (bit_ior @0 @1) (bit_and @0 @1))
822 (bit_xor @0 @1))
823
66cc6273
MP
824/* (x | y) & ~(x & y) -> x ^ y */
825(simplify
826 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
827 (bit_xor @0 @1))
828
829/* (x | y) & (~x ^ y) -> x & y */
830(simplify
831 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
832 (bit_and @0 @1))
833
5b00d921
RB
834/* ~x & ~y -> ~(x | y)
835 ~x | ~y -> ~(x & y) */
836(for op (bit_and bit_ior)
837 rop (bit_ior bit_and)
838 (simplify
839 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
840 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
841 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
842 (bit_not (rop (convert @0) (convert @1))))))
843
14ea9f92 844/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
845 with a constant, and the two constants have no bits in common,
846 we should treat this as a BIT_IOR_EXPR since this may produce more
847 simplifications. */
14ea9f92
RB
848(for op (bit_xor plus)
849 (simplify
850 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
851 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
852 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
853 && tree_nop_conversion_p (type, TREE_TYPE (@2))
854 && wi::bit_and (@1, @3) == 0)
855 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
856
857/* (X | Y) ^ X -> Y & ~ X*/
858(simplify
2eef1fc1 859 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
5b00d921
RB
860 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
861 (convert (bit_and @1 (bit_not @0)))))
862
863/* Convert ~X ^ ~Y to X ^ Y. */
864(simplify
865 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
866 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
867 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
868 (bit_xor (convert @0) (convert @1))))
869
870/* Convert ~X ^ C to X ^ ~C. */
871(simplify
872 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
873 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
874 (bit_xor (convert @0) (bit_not @1))))
5b00d921 875
e39dab2c
MG
876/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
877(for opo (bit_and bit_xor)
878 opi (bit_xor bit_and)
879 (simplify
880 (opo:c (opi:c @0 @1) @1)
881 (bit_and (bit_not @0) @1)))
97e77391 882
14ea9f92
RB
883/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
884 operands are another bit-wise operation with a common input. If so,
885 distribute the bit operations to save an operation and possibly two if
886 constants are involved. For example, convert
887 (A | B) & (A | C) into A | (B & C)
888 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
889(for op (bit_and bit_ior bit_xor)
890 rop (bit_ior bit_and bit_and)
14ea9f92 891 (simplify
2eef1fc1 892 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
e07ab2fe
MG
893 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
894 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
895 (rop (convert @0) (op (convert @1) (convert @2))))))
896
e39dab2c
MG
897/* Some simple reassociation for bit operations, also handled in reassoc. */
898/* (X & Y) & Y -> X & Y
899 (X | Y) | Y -> X | Y */
900(for op (bit_and bit_ior)
901 (simplify
2eef1fc1 902 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
e39dab2c
MG
903 @2))
904/* (X ^ Y) ^ Y -> X */
905(simplify
2eef1fc1 906 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
ece46666 907 (convert @0))
e39dab2c
MG
908/* (X & Y) & (X & Z) -> (X & Y) & Z
909 (X | Y) | (X | Z) -> (X | Y) | Z */
910(for op (bit_and bit_ior)
911 (simplify
6c35e5b0 912 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
e39dab2c
MG
913 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
914 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
915 (if (single_use (@5) && single_use (@6))
916 (op @3 (convert @2))
917 (if (single_use (@3) && single_use (@4))
918 (op (convert @1) @5))))))
919/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
920(simplify
921 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
922 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
923 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 924 (bit_xor (convert @1) (convert @2))))
5b00d921 925
b14a9c57
RB
926(simplify
927 (abs (abs@1 @0))
928 @1)
f3582e54
RB
929(simplify
930 (abs (negate @0))
931 (abs @0))
932(simplify
933 (abs tree_expr_nonnegative_p@0)
934 @0)
935
55cf3946
RB
936/* A few cases of fold-const.c negate_expr_p predicate. */
937(match negate_expr_p
938 INTEGER_CST
b14a9c57 939 (if ((INTEGRAL_TYPE_P (type)
56a6d474 940 && TYPE_UNSIGNED (type))
b14a9c57 941 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
942 && may_negate_without_overflow_p (t)))))
943(match negate_expr_p
944 FIXED_CST)
945(match negate_expr_p
946 (negate @0)
947 (if (!TYPE_OVERFLOW_SANITIZED (type))))
948(match negate_expr_p
949 REAL_CST
950 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
951/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
952 ways. */
953(match negate_expr_p
954 VECTOR_CST
955 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
0a8f32b8
RB
956
957/* (-A) * (-B) -> A * B */
958(simplify
959 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
960 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
961 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
962 (mult (convert @0) (convert (negate @1)))))
55cf3946
RB
963
964/* -(A + B) -> (-B) - A. */
b14a9c57 965(simplify
55cf3946
RB
966 (negate (plus:c @0 negate_expr_p@1))
967 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
968 && !HONOR_SIGNED_ZEROS (element_mode (type)))
969 (minus (negate @1) @0)))
970
971/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 972(simplify
55cf3946 973 (minus @0 negate_expr_p@1)
e4e96a4f
KT
974 (if (!FIXED_POINT_TYPE_P (type))
975 (plus @0 (negate @1))))
d4573ffe 976
5609420f
RB
977/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
978 when profitable.
979 For bitwise binary operations apply operand conversions to the
980 binary operation result instead of to the operands. This allows
981 to combine successive conversions and bitwise binary operations.
982 We combine the above two cases by using a conditional convert. */
983(for bitop (bit_and bit_ior bit_xor)
984 (simplify
985 (bitop (convert @0) (convert? @1))
986 (if (((TREE_CODE (@1) == INTEGER_CST
987 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 988 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 989 || types_match (@0, @1))
ad6f996c
RB
990 /* ??? This transform conflicts with fold-const.c doing
991 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
992 constants (if x has signed type, the sign bit cannot be set
993 in c). This folds extension into the BIT_AND_EXPR.
994 Restrict it to GIMPLE to avoid endless recursions. */
995 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
996 && (/* That's a good idea if the conversion widens the operand, thus
997 after hoisting the conversion the operation will be narrower. */
998 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
999 /* It's also a good idea if the conversion is to a non-integer
1000 mode. */
1001 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1002 /* Or if the precision of TO is not the same as the precision
1003 of its mode. */
2be65d9e 1004 || !type_has_mode_precision_p (type)))
5609420f
RB
1005 (convert (bitop @0 (convert @1))))))
1006
b14a9c57
RB
1007(for bitop (bit_and bit_ior)
1008 rbitop (bit_ior bit_and)
1009 /* (x | y) & x -> x */
1010 /* (x & y) | x -> x */
1011 (simplify
1012 (bitop:c (rbitop:c @0 @1) @0)
1013 @0)
1014 /* (~x | y) & x -> x & y */
1015 /* (~x & y) | x -> x | y */
1016 (simplify
1017 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1018 (bitop @0 @1)))
1019
5609420f
RB
1020/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1021(simplify
1022 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1023 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1024
1025/* Combine successive equal operations with constants. */
1026(for bitop (bit_and bit_ior bit_xor)
1027 (simplify
1028 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1029 (bitop @0 (bitop @1 @2))))
1030
1031/* Try simple folding for X op !X, and X op X with the help
1032 of the truth_valued_p and logical_inverted_value predicates. */
1033(match truth_valued_p
1034 @0
1035 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 1036(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
1037 (match truth_valued_p
1038 (op @0 @1)))
1039(match truth_valued_p
1040 (truth_not @0))
1041
0a8f32b8
RB
1042(match (logical_inverted_value @0)
1043 (truth_not @0))
5609420f
RB
1044(match (logical_inverted_value @0)
1045 (bit_not truth_valued_p@0))
1046(match (logical_inverted_value @0)
09240451 1047 (eq @0 integer_zerop))
5609420f 1048(match (logical_inverted_value @0)
09240451 1049 (ne truth_valued_p@0 integer_truep))
5609420f 1050(match (logical_inverted_value @0)
09240451 1051 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
1052
1053/* X & !X -> 0. */
1054(simplify
1055 (bit_and:c @0 (logical_inverted_value @0))
1056 { build_zero_cst (type); })
1057/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1058(for op (bit_ior bit_xor)
1059 (simplify
1060 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 1061 { constant_boolean_node (true, type); }))
59c20dc7
RB
1062/* X ==/!= !X is false/true. */
1063(for op (eq ne)
1064 (simplify
1065 (op:c truth_valued_p@0 (logical_inverted_value @0))
1066 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 1067
5609420f
RB
1068/* ~~x -> x */
1069(simplify
1070 (bit_not (bit_not @0))
1071 @0)
1072
b14a9c57
RB
1073/* Convert ~ (-A) to A - 1. */
1074(simplify
1075 (bit_not (convert? (negate @0)))
ece46666
MG
1076 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1077 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 1078 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57
RB
1079
1080/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1081(simplify
8b5ee871 1082 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
1083 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1084 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1085 (convert (negate @0))))
1086(simplify
1087 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
1088 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1089 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
1090 (convert (negate @0))))
1091
1092/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1093(simplify
1094 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1095 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1096 (convert (bit_xor @0 (bit_not @1)))))
1097(simplify
1098 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1099 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1100 (convert (bit_xor @0 @1))))
1101
f52baa7b
MP
1102/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1103(simplify
44fc0a51
RB
1104 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1105 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 1106
f7b7b0aa
MP
1107/* Fold A - (A & B) into ~B & A. */
1108(simplify
2eef1fc1 1109 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
f7b7b0aa
MP
1110 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1111 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1112 (convert (bit_and (bit_not @1) @0))))
5609420f 1113
2071f8f9
N
1114/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1115(for cmp (gt lt ge le)
1116(simplify
1117 (mult (convert (cmp @0 @1)) @2)
1118 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1119
e36c1cfe
N
1120/* For integral types with undefined overflow and C != 0 fold
1121 x * C EQ/NE y * C into x EQ/NE y. */
1122(for cmp (eq ne)
1123 (simplify
1124 (cmp (mult:c @0 @1) (mult:c @2 @1))
1125 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1126 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1127 && tree_expr_nonzero_p (@1))
1128 (cmp @0 @2))))
1129
42bd89ce
MG
1130/* For integral types with wrapping overflow and C odd fold
1131 x * C EQ/NE y * C into x EQ/NE y. */
1132(for cmp (eq ne)
1133 (simplify
1134 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1135 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1136 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1137 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1138 (cmp @0 @2))))
1139
e36c1cfe
N
1140/* For integral types with undefined overflow and C != 0 fold
1141 x * C RELOP y * C into:
84ff66b8 1142
e36c1cfe
N
1143 x RELOP y for nonnegative C
1144 y RELOP x for negative C */
1145(for cmp (lt gt le ge)
1146 (simplify
1147 (cmp (mult:c @0 @1) (mult:c @2 @1))
1148 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1149 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1150 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1151 (cmp @0 @2)
1152 (if (TREE_CODE (@1) == INTEGER_CST
1153 && wi::neg_p (@1, TYPE_SIGN (TREE_TYPE (@1))))
1154 (cmp @2 @0))))))
84ff66b8 1155
564e405c
JJ
1156/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1157(for cmp (le gt)
1158 icmp (gt le)
1159 (simplify
1160 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1161 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1162 && TYPE_UNSIGNED (TREE_TYPE (@0))
1163 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1164 && wi::eq_p (@2, wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)),
1165 SIGNED) - 1))
1166 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1167 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1168
a8492d5e
MG
1169/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1170(for cmp (simple_comparison)
1171 (simplify
1172 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1173 (if (wi::gt_p(@2, 0, TYPE_SIGN (TREE_TYPE (@2))))
1174 (cmp @0 @1))))
1175
8d1628eb
JJ
1176/* X / C1 op C2 into a simple range test. */
1177(for cmp (simple_comparison)
1178 (simplify
1179 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1180 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1181 && integer_nonzerop (@1)
1182 && !TREE_OVERFLOW (@1)
1183 && !TREE_OVERFLOW (@2))
1184 (with { tree lo, hi; bool neg_overflow;
1185 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1186 &neg_overflow); }
1187 (switch
1188 (if (code == LT_EXPR || code == GE_EXPR)
1189 (if (TREE_OVERFLOW (lo))
1190 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1191 (if (code == LT_EXPR)
1192 (lt @0 { lo; })
1193 (ge @0 { lo; }))))
1194 (if (code == LE_EXPR || code == GT_EXPR)
1195 (if (TREE_OVERFLOW (hi))
1196 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1197 (if (code == LE_EXPR)
1198 (le @0 { hi; })
1199 (gt @0 { hi; }))))
1200 (if (!lo && !hi)
1201 { build_int_cst (type, code == NE_EXPR); })
1202 (if (code == EQ_EXPR && !hi)
1203 (ge @0 { lo; }))
1204 (if (code == EQ_EXPR && !lo)
1205 (le @0 { hi; }))
1206 (if (code == NE_EXPR && !hi)
1207 (lt @0 { lo; }))
1208 (if (code == NE_EXPR && !lo)
1209 (gt @0 { hi; }))
1210 (if (GENERIC)
1211 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1212 lo, hi); })
1213 (with
1214 {
1215 tree etype = range_check_type (TREE_TYPE (@0));
1216 if (etype)
1217 {
1218 if (! TYPE_UNSIGNED (etype))
1219 etype = unsigned_type_for (etype);
1220 hi = fold_convert (etype, hi);
1221 lo = fold_convert (etype, lo);
1222 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1223 }
1224 }
1225 (if (etype && hi && !TREE_OVERFLOW (hi))
1226 (if (code == EQ_EXPR)
1227 (le (minus (convert:etype @0) { lo; }) { hi; })
1228 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1229
d35256b6
MG
1230/* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1231(for op (lt le ge gt)
1232 (simplify
1233 (op (plus:c @0 @2) (plus:c @1 @2))
1234 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1235 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1236 (op @0 @1))))
1237/* For equality and subtraction, this is also true with wrapping overflow. */
1238(for op (eq ne minus)
1239 (simplify
1240 (op (plus:c @0 @2) (plus:c @1 @2))
1241 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1242 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1243 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1244 (op @0 @1))))
1245
1246/* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1247(for op (lt le ge gt)
1248 (simplify
1249 (op (minus @0 @2) (minus @1 @2))
1250 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1251 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1252 (op @0 @1))))
1253/* For equality and subtraction, this is also true with wrapping overflow. */
1254(for op (eq ne minus)
1255 (simplify
1256 (op (minus @0 @2) (minus @1 @2))
1257 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1258 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1259 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1260 (op @0 @1))))
1261
1262/* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1263(for op (lt le ge gt)
1264 (simplify
1265 (op (minus @2 @0) (minus @2 @1))
1266 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1267 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1268 (op @1 @0))))
1269/* For equality and subtraction, this is also true with wrapping overflow. */
1270(for op (eq ne minus)
1271 (simplify
1272 (op (minus @2 @0) (minus @2 @1))
1273 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1274 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1275 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1276 (op @1 @0))))
1277
8b656ca7
MG
1278/* X == C - X can never be true if C is odd. */
1279(for cmp (eq ne)
1280 (simplify
1281 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1282 (if (TREE_INT_CST_LOW (@1) & 1)
1283 { constant_boolean_node (cmp == NE_EXPR, type); })))
1284
10bc8017
MG
1285/* Arguments on which one can call get_nonzero_bits to get the bits
1286 possibly set. */
1287(match with_possible_nonzero_bits
1288 INTEGER_CST@0)
1289(match with_possible_nonzero_bits
1290 SSA_NAME@0
1291 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1292/* Slightly extended version, do not make it recursive to keep it cheap. */
1293(match (with_possible_nonzero_bits2 @0)
1294 with_possible_nonzero_bits@0)
1295(match (with_possible_nonzero_bits2 @0)
1296 (bit_and:c with_possible_nonzero_bits@0 @2))
1297
1298/* Same for bits that are known to be set, but we do not have
1299 an equivalent to get_nonzero_bits yet. */
1300(match (with_certain_nonzero_bits2 @0)
1301 INTEGER_CST@0)
1302(match (with_certain_nonzero_bits2 @0)
1303 (bit_ior @1 INTEGER_CST@0))
1304
1305/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1306(for cmp (eq ne)
1307 (simplify
1308 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1309 (if ((~get_nonzero_bits (@0) & @1) != 0)
1310 { constant_boolean_node (cmp == NE_EXPR, type); })))
1311
84ff66b8
AV
1312/* ((X inner_op C0) outer_op C1)
1313 With X being a tree where value_range has reasoned certain bits to always be
1314 zero throughout its computed value range,
1315 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1316 where zero_mask has 1's for all bits that are sure to be 0 in
1317 and 0's otherwise.
1318 if (inner_op == '^') C0 &= ~C1;
1319 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1320 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1321*/
1322(for inner_op (bit_ior bit_xor)
1323 outer_op (bit_xor bit_ior)
1324(simplify
1325 (outer_op
1326 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1327 (with
1328 {
1329 bool fail = false;
1330 wide_int zero_mask_not;
1331 wide_int C0;
1332 wide_int cst_emit;
1333
1334 if (TREE_CODE (@2) == SSA_NAME)
1335 zero_mask_not = get_nonzero_bits (@2);
1336 else
1337 fail = true;
1338
1339 if (inner_op == BIT_XOR_EXPR)
1340 {
1341 C0 = wi::bit_and_not (@0, @1);
1342 cst_emit = wi::bit_or (C0, @1);
1343 }
1344 else
1345 {
1346 C0 = @0;
1347 cst_emit = wi::bit_xor (@0, @1);
1348 }
1349 }
1350 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
1351 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1352 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
1353 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1354
a499aac5
RB
1355/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1356(simplify
44fc0a51
RB
1357 (pointer_plus (pointer_plus:s @0 @1) @3)
1358 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1359
1360/* Pattern match
1361 tem1 = (long) ptr1;
1362 tem2 = (long) ptr2;
1363 tem3 = tem2 - tem1;
1364 tem4 = (unsigned long) tem3;
1365 tem5 = ptr1 + tem4;
1366 and produce
1367 tem5 = ptr2; */
1368(simplify
1369 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1370 /* Conditionally look through a sign-changing conversion. */
1371 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1372 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1373 || (GENERIC && type == TREE_TYPE (@1))))
1374 @1))
1375
1376/* Pattern match
1377 tem = (sizetype) ptr;
1378 tem = tem & algn;
1379 tem = -tem;
1380 ... = ptr p+ tem;
1381 and produce the simpler and easier to analyze with respect to alignment
1382 ... = ptr & ~algn; */
1383(simplify
1384 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1385 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1386 (bit_and @0 { algn; })))
1387
99e943a2
RB
1388/* Try folding difference of addresses. */
1389(simplify
1390 (minus (convert ADDR_EXPR@0) (convert @1))
1391 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1392 (with { HOST_WIDE_INT diff; }
1393 (if (ptr_difference_const (@0, @1, &diff))
1394 { build_int_cst_type (type, diff); }))))
1395(simplify
1396 (minus (convert @0) (convert ADDR_EXPR@1))
1397 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1398 (with { HOST_WIDE_INT diff; }
1399 (if (ptr_difference_const (@0, @1, &diff))
1400 { build_int_cst_type (type, diff); }))))
1401
bab73f11
RB
1402/* If arg0 is derived from the address of an object or function, we may
1403 be able to fold this expression using the object or function's
1404 alignment. */
1405(simplify
1406 (bit_and (convert? @0) INTEGER_CST@1)
1407 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1408 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1409 (with
1410 {
1411 unsigned int align;
1412 unsigned HOST_WIDE_INT bitpos;
1413 get_pointer_alignment_1 (@0, &align, &bitpos);
1414 }
1415 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1416 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
99e943a2 1417
a499aac5 1418
cc7b5acf
RB
1419/* We can't reassociate at all for saturating types. */
1420(if (!TYPE_SATURATING (type))
1421
1422 /* Contract negates. */
1423 /* A + (-B) -> A - B */
1424 (simplify
248179b5
RB
1425 (plus:c @0 (convert? (negate @1)))
1426 /* Apply STRIP_NOPS on the negate. */
1427 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1428 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
1429 (with
1430 {
1431 tree t1 = type;
1432 if (INTEGRAL_TYPE_P (type)
1433 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1434 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1435 }
1436 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
cc7b5acf
RB
1437 /* A - (-B) -> A + B */
1438 (simplify
248179b5
RB
1439 (minus @0 (convert? (negate @1)))
1440 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1441 && !TYPE_OVERFLOW_SANITIZED (type))
248179b5
RB
1442 (with
1443 {
1444 tree t1 = type;
1445 if (INTEGRAL_TYPE_P (type)
1446 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1447 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1448 }
1449 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
cc7b5acf
RB
1450 /* -(-A) -> A */
1451 (simplify
1452 (negate (convert? (negate @1)))
1453 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1454 && !TYPE_OVERFLOW_SANITIZED (type))
a0f12cf8 1455 (convert @1)))
cc7b5acf 1456
7318e44f
RB
1457 /* We can't reassociate floating-point unless -fassociative-math
1458 or fixed-point plus or minus because of saturation to +-Inf. */
1459 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1460 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
1461
1462 /* Match patterns that allow contracting a plus-minus pair
1463 irrespective of overflow issues. */
1464 /* (A +- B) - A -> +- B */
1465 /* (A +- B) -+ B -> A */
1466 /* A - (A +- B) -> -+ B */
1467 /* A +- (B -+ A) -> +- B */
1468 (simplify
1469 (minus (plus:c @0 @1) @0)
1470 @1)
1471 (simplify
1472 (minus (minus @0 @1) @0)
1473 (negate @1))
1474 (simplify
1475 (plus:c (minus @0 @1) @1)
1476 @0)
1477 (simplify
1478 (minus @0 (plus:c @0 @1))
1479 (negate @1))
1480 (simplify
1481 (minus @0 (minus @0 @1))
1482 @1)
1e7df2e6
MG
1483 /* (A +- B) + (C - A) -> C +- B */
1484 /* (A + B) - (A - C) -> B + C */
1485 /* More cases are handled with comparisons. */
1486 (simplify
1487 (plus:c (plus:c @0 @1) (minus @2 @0))
1488 (plus @2 @1))
1489 (simplify
1490 (plus:c (minus @0 @1) (minus @2 @0))
1491 (minus @2 @1))
1492 (simplify
1493 (minus (plus:c @0 @1) (minus @0 @2))
1494 (plus @1 @2))
cc7b5acf 1495
ed73f46f
MG
1496 /* (A +- CST1) +- CST2 -> A + CST3
1497 Use view_convert because it is safe for vectors and equivalent for
1498 scalars. */
cc7b5acf
RB
1499 (for outer_op (plus minus)
1500 (for inner_op (plus minus)
ed73f46f 1501 neg_inner_op (minus plus)
cc7b5acf 1502 (simplify
ed73f46f
MG
1503 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1504 CONSTANT_CLASS_P@2)
1505 /* If one of the types wraps, use that one. */
1506 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1507 (if (outer_op == PLUS_EXPR)
1508 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1509 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))
1510 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1511 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1512 (if (outer_op == PLUS_EXPR)
1513 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1514 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1515 /* If the constant operation overflows we cannot do the transform
1516 directly as we would introduce undefined overflow, for example
1517 with (a - 1) + INT_MIN. */
1518 (if (types_match (type, @0))
1519 (with { tree cst = const_binop (outer_op == inner_op
1520 ? PLUS_EXPR : MINUS_EXPR,
1521 type, @1, @2); }
1522 (if (cst && !TREE_OVERFLOW (cst))
1523 (inner_op @0 { cst; } )
1524 /* X+INT_MAX+1 is X-INT_MIN. */
1525 (if (INTEGRAL_TYPE_P (type) && cst
1526 && wi::eq_p (cst, wi::min_value (type)))
1527 (neg_inner_op @0 { wide_int_to_tree (type, cst); })
1528 /* Last resort, use some unsigned type. */
1529 (with { tree utype = unsigned_type_for (type); }
1530 (view_convert (inner_op
1531 (view_convert:utype @0)
1532 (view_convert:utype
1533 { drop_tree_overflow (cst); })))))))))))))
cc7b5acf 1534
b302f2e0 1535 /* (CST1 - A) +- CST2 -> CST3 - A */
cc7b5acf
RB
1536 (for outer_op (plus minus)
1537 (simplify
1538 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
23f27839 1539 (with { tree cst = const_binop (outer_op, type, @1, @2); }
cc7b5acf
RB
1540 (if (cst && !TREE_OVERFLOW (cst))
1541 (minus { cst; } @0)))))
1542
b302f2e0
RB
1543 /* CST1 - (CST2 - A) -> CST3 + A */
1544 (simplify
1545 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1546 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1547 (if (cst && !TREE_OVERFLOW (cst))
1548 (plus { cst; } @0))))
1549
cc7b5acf
RB
1550 /* ~A + A -> -1 */
1551 (simplify
1552 (plus:c (bit_not @0) @0)
1553 (if (!TYPE_OVERFLOW_TRAPS (type))
1554 { build_all_ones_cst (type); }))
1555
1556 /* ~A + 1 -> -A */
1557 (simplify
e19740ae
RB
1558 (plus (convert? (bit_not @0)) integer_each_onep)
1559 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1560 (negate (convert @0))))
1561
1562 /* -A - 1 -> ~A */
1563 (simplify
1564 (minus (convert? (negate @0)) integer_each_onep)
1565 (if (!TYPE_OVERFLOW_TRAPS (type)
1566 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1567 (bit_not (convert @0))))
1568
1569 /* -1 - A -> ~A */
1570 (simplify
1571 (minus integer_all_onesp @0)
bc4315fb 1572 (bit_not @0))
cc7b5acf
RB
1573
1574 /* (T)(P + A) - (T)P -> (T) A */
1575 (for add (plus pointer_plus)
1576 (simplify
2eef1fc1 1577 (minus (convert (add @@0 @1))
cc7b5acf 1578 (convert @0))
09240451 1579 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
cc7b5acf
RB
1580 /* For integer types, if A has a smaller type
1581 than T the result depends on the possible
1582 overflow in P + A.
1583 E.g. T=size_t, A=(unsigned)429497295, P>0.
1584 However, if an overflow in P + A would cause
1585 undefined behavior, we can assume that there
1586 is no overflow. */
1587 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1588 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1589 /* For pointer types, if the conversion of A to the
1590 final type requires a sign- or zero-extension,
1591 then we have to punt - it is not defined which
1592 one is correct. */
1593 || (POINTER_TYPE_P (TREE_TYPE (@0))
1594 && TREE_CODE (@1) == INTEGER_CST
1595 && tree_int_cst_sign_bit (@1) == 0))
a8fc2579
RB
1596 (convert @1))))
1597
1598 /* (T)P - (T)(P + A) -> -(T) A */
1599 (for add (plus pointer_plus)
1600 (simplify
1601 (minus (convert @0)
2eef1fc1 1602 (convert (add @@0 @1)))
a8fc2579
RB
1603 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1604 /* For integer types, if A has a smaller type
1605 than T the result depends on the possible
1606 overflow in P + A.
1607 E.g. T=size_t, A=(unsigned)429497295, P>0.
1608 However, if an overflow in P + A would cause
1609 undefined behavior, we can assume that there
1610 is no overflow. */
1611 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1612 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1613 /* For pointer types, if the conversion of A to the
1614 final type requires a sign- or zero-extension,
1615 then we have to punt - it is not defined which
1616 one is correct. */
1617 || (POINTER_TYPE_P (TREE_TYPE (@0))
1618 && TREE_CODE (@1) == INTEGER_CST
1619 && tree_int_cst_sign_bit (@1) == 0))
1620 (negate (convert @1)))))
1621
1622 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1623 (for add (plus pointer_plus)
1624 (simplify
2eef1fc1 1625 (minus (convert (add @@0 @1))
a8fc2579
RB
1626 (convert (add @0 @2)))
1627 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1628 /* For integer types, if A has a smaller type
1629 than T the result depends on the possible
1630 overflow in P + A.
1631 E.g. T=size_t, A=(unsigned)429497295, P>0.
1632 However, if an overflow in P + A would cause
1633 undefined behavior, we can assume that there
1634 is no overflow. */
1635 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1636 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1637 /* For pointer types, if the conversion of A to the
1638 final type requires a sign- or zero-extension,
1639 then we have to punt - it is not defined which
1640 one is correct. */
1641 || (POINTER_TYPE_P (TREE_TYPE (@0))
1642 && TREE_CODE (@1) == INTEGER_CST
1643 && tree_int_cst_sign_bit (@1) == 0
1644 && TREE_CODE (@2) == INTEGER_CST
1645 && tree_int_cst_sign_bit (@2) == 0))
1646 (minus (convert @1) (convert @2)))))))
cc7b5acf
RB
1647
1648
0122e8e5 1649/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 1650
0122e8e5 1651(for minmax (min max FMIN FMAX)
a7f24614
RB
1652 (simplify
1653 (minmax @0 @0)
1654 @0))
4a334cba
RS
1655/* min(max(x,y),y) -> y. */
1656(simplify
1657 (min:c (max:c @0 @1) @1)
1658 @1)
1659/* max(min(x,y),y) -> y. */
1660(simplify
1661 (max:c (min:c @0 @1) @1)
1662 @1)
d657e995
RB
1663/* max(a,-a) -> abs(a). */
1664(simplify
1665 (max:c @0 (negate @0))
1666 (if (TREE_CODE (type) != COMPLEX_TYPE
1667 && (! ANY_INTEGRAL_TYPE_P (type)
1668 || TYPE_OVERFLOW_UNDEFINED (type)))
1669 (abs @0)))
54f84ca9
RB
1670/* min(a,-a) -> -abs(a). */
1671(simplify
1672 (min:c @0 (negate @0))
1673 (if (TREE_CODE (type) != COMPLEX_TYPE
1674 && (! ANY_INTEGRAL_TYPE_P (type)
1675 || TYPE_OVERFLOW_UNDEFINED (type)))
1676 (negate (abs @0))))
a7f24614
RB
1677(simplify
1678 (min @0 @1)
2c2870a1
MG
1679 (switch
1680 (if (INTEGRAL_TYPE_P (type)
1681 && TYPE_MIN_VALUE (type)
1682 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1683 @1)
1684 (if (INTEGRAL_TYPE_P (type)
1685 && TYPE_MAX_VALUE (type)
1686 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1687 @0)))
a7f24614
RB
1688(simplify
1689 (max @0 @1)
2c2870a1
MG
1690 (switch
1691 (if (INTEGRAL_TYPE_P (type)
1692 && TYPE_MAX_VALUE (type)
1693 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1694 @1)
1695 (if (INTEGRAL_TYPE_P (type)
1696 && TYPE_MIN_VALUE (type)
1697 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1698 @0)))
ad6e4ba8 1699
182f37c9
N
1700/* max (a, a + CST) -> a + CST where CST is positive. */
1701/* max (a, a + CST) -> a where CST is negative. */
1702(simplify
1703 (max:c @0 (plus@2 @0 INTEGER_CST@1))
1704 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1705 (if (tree_int_cst_sgn (@1) > 0)
1706 @2
1707 @0)))
1708
1709/* min (a, a + CST) -> a where CST is positive. */
1710/* min (a, a + CST) -> a + CST where CST is negative. */
1711(simplify
1712 (min:c @0 (plus@2 @0 INTEGER_CST@1))
1713 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1714 (if (tree_int_cst_sgn (@1) > 0)
1715 @0
1716 @2)))
1717
ad6e4ba8
BC
1718/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1719 and the outer convert demotes the expression back to x's type. */
1720(for minmax (min max)
1721 (simplify
1722 (convert (minmax@0 (convert @1) INTEGER_CST@2))
ebf41734
BC
1723 (if (INTEGRAL_TYPE_P (type)
1724 && types_match (@1, type) && int_fits_type_p (@2, type)
ad6e4ba8
BC
1725 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1726 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1727 (minmax @1 (convert @2)))))
1728
0122e8e5
RS
1729(for minmax (FMIN FMAX)
1730 /* If either argument is NaN, return the other one. Avoid the
1731 transformation if we get (and honor) a signalling NaN. */
1732 (simplify
1733 (minmax:c @0 REAL_CST@1)
1734 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1735 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1736 @0)))
1737/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1738 functions to return the numeric arg if the other one is NaN.
1739 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1740 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1741 worry about it either. */
1742(if (flag_finite_math_only)
1743 (simplify
1744 (FMIN @0 @1)
1745 (min @0 @1))
1746 (simplify
1747 (FMAX @0 @1)
1748 (max @0 @1)))
ce0e66ff
MG
1749/* min (-A, -B) -> -max (A, B) */
1750(for minmax (min max FMIN FMAX)
1751 maxmin (max min FMAX FMIN)
1752 (simplify
1753 (minmax (negate:s@2 @0) (negate:s@3 @1))
1754 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1755 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1756 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1757 (negate (maxmin @0 @1)))))
1758/* MIN (~X, ~Y) -> ~MAX (X, Y)
1759 MAX (~X, ~Y) -> ~MIN (X, Y) */
1760(for minmax (min max)
1761 maxmin (max min)
1762 (simplify
1763 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1764 (bit_not (maxmin @0 @1))))
a7f24614 1765
b4817bd6
MG
1766/* MIN (X, Y) == X -> X <= Y */
1767(for minmax (min min max max)
1768 cmp (eq ne eq ne )
1769 out (le gt ge lt )
1770 (simplify
1771 (cmp:c (minmax:c @0 @1) @0)
1772 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1773 (out @0 @1))))
1774/* MIN (X, 5) == 0 -> X == 0
1775 MIN (X, 5) == 7 -> false */
1776(for cmp (eq ne)
1777 (simplify
1778 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1779 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1780 { constant_boolean_node (cmp == NE_EXPR, type); }
1781 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1782 (cmp @0 @2)))))
1783(for cmp (eq ne)
1784 (simplify
1785 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1786 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1787 { constant_boolean_node (cmp == NE_EXPR, type); }
1788 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1789 (cmp @0 @2)))))
1790/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1791(for minmax (min min max max min min max max )
1792 cmp (lt le gt ge gt ge lt le )
1793 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1794 (simplify
1795 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1796 (comb (cmp @0 @2) (cmp @1 @2))))
1797
a7f24614
RB
1798/* Simplifications of shift and rotates. */
1799
1800(for rotate (lrotate rrotate)
1801 (simplify
1802 (rotate integer_all_onesp@0 @1)
1803 @0))
1804
1805/* Optimize -1 >> x for arithmetic right shifts. */
1806(simplify
1807 (rshift integer_all_onesp@0 @1)
1808 (if (!TYPE_UNSIGNED (type)
1809 && tree_expr_nonnegative_p (@1))
1810 @0))
1811
12085390
N
1812/* Optimize (x >> c) << c into x & (-1<<c). */
1813(simplify
1814 (lshift (rshift @0 INTEGER_CST@1) @1)
1815 (if (wi::ltu_p (@1, element_precision (type)))
1816 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1817
1818/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1819 types. */
1820(simplify
1821 (rshift (lshift @0 INTEGER_CST@1) @1)
1822 (if (TYPE_UNSIGNED (type)
1823 && (wi::ltu_p (@1, element_precision (type))))
1824 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1825
a7f24614
RB
1826(for shiftrotate (lrotate rrotate lshift rshift)
1827 (simplify
1828 (shiftrotate @0 integer_zerop)
1829 (non_lvalue @0))
1830 (simplify
1831 (shiftrotate integer_zerop@0 @1)
1832 @0)
1833 /* Prefer vector1 << scalar to vector1 << vector2
1834 if vector2 is uniform. */
1835 (for vec (VECTOR_CST CONSTRUCTOR)
1836 (simplify
1837 (shiftrotate @0 vec@1)
1838 (with { tree tem = uniform_vector_p (@1); }
1839 (if (tem)
1840 (shiftrotate @0 { tem; }))))))
1841
165ba2e9
JJ
1842/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
1843 Y is 0. Similarly for X >> Y. */
1844#if GIMPLE
1845(for shift (lshift rshift)
1846 (simplify
1847 (shift @0 SSA_NAME@1)
1848 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
1849 (with {
1850 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
1851 int prec = TYPE_PRECISION (TREE_TYPE (@1));
1852 }
1853 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
1854 @0)))))
1855#endif
1856
a7f24614
RB
1857/* Rewrite an LROTATE_EXPR by a constant into an
1858 RROTATE_EXPR by a new constant. */
1859(simplify
1860 (lrotate @0 INTEGER_CST@1)
23f27839 1861 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
1862 build_int_cst (TREE_TYPE (@1),
1863 element_precision (type)), @1); }))
1864
14ea9f92
RB
1865/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1866(for op (lrotate rrotate rshift lshift)
1867 (simplify
1868 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1869 (with { unsigned int prec = element_precision (type); }
1870 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1871 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1872 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1873 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1874 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1875 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1876 being well defined. */
1877 (if (low >= prec)
1878 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 1879 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 1880 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
1881 { build_zero_cst (type); }
1882 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1883 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
1884
1885
01ada710
MP
1886/* ((1 << A) & 1) != 0 -> A == 0
1887 ((1 << A) & 1) == 0 -> A != 0 */
1888(for cmp (ne eq)
1889 icmp (eq ne)
1890 (simplify
1891 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1892 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 1893
f2e609c3
MP
1894/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1895 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1896 if CST2 != 0. */
1897(for cmp (ne eq)
1898 (simplify
1899 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1900 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1901 (if (cand < 0
1902 || (!integer_zerop (@2)
1903 && wi::ne_p (wi::lshift (@0, cand), @2)))
8fdc6c67
RB
1904 { constant_boolean_node (cmp == NE_EXPR, type); }
1905 (if (!integer_zerop (@2)
1906 && wi::eq_p (wi::lshift (@0, cand), @2))
1907 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 1908
1ffbaa3f
RB
1909/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1910 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1911 if the new mask might be further optimized. */
1912(for shift (lshift rshift)
1913 (simplify
44fc0a51
RB
1914 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1915 INTEGER_CST@2)
1ffbaa3f
RB
1916 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1917 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1918 && tree_fits_uhwi_p (@1)
1919 && tree_to_uhwi (@1) > 0
1920 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1921 (with
1922 {
1923 unsigned int shiftc = tree_to_uhwi (@1);
1924 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1925 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1926 tree shift_type = TREE_TYPE (@3);
1927 unsigned int prec;
1928
1929 if (shift == LSHIFT_EXPR)
fecfbfa4 1930 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f 1931 else if (shift == RSHIFT_EXPR
2be65d9e 1932 && type_has_mode_precision_p (shift_type))
1ffbaa3f
RB
1933 {
1934 prec = TYPE_PRECISION (TREE_TYPE (@3));
1935 tree arg00 = @0;
1936 /* See if more bits can be proven as zero because of
1937 zero extension. */
1938 if (@3 != @0
1939 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1940 {
1941 tree inner_type = TREE_TYPE (@0);
2be65d9e 1942 if (type_has_mode_precision_p (inner_type)
1ffbaa3f
RB
1943 && TYPE_PRECISION (inner_type) < prec)
1944 {
1945 prec = TYPE_PRECISION (inner_type);
1946 /* See if we can shorten the right shift. */
1947 if (shiftc < prec)
1948 shift_type = inner_type;
1949 /* Otherwise X >> C1 is all zeros, so we'll optimize
1950 it into (X, 0) later on by making sure zerobits
1951 is all ones. */
1952 }
1953 }
dd4786fe 1954 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
1955 if (shiftc < prec)
1956 {
1957 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1958 zerobits <<= prec - shiftc;
1959 }
1960 /* For arithmetic shift if sign bit could be set, zerobits
1961 can contain actually sign bits, so no transformation is
1962 possible, unless MASK masks them all away. In that
1963 case the shift needs to be converted into logical shift. */
1964 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1965 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1966 {
1967 if ((mask & zerobits) == 0)
1968 shift_type = unsigned_type_for (TREE_TYPE (@3));
1969 else
1970 zerobits = 0;
1971 }
1972 }
1973 }
1974 /* ((X << 16) & 0xff00) is (X, 0). */
1975 (if ((mask & zerobits) == mask)
8fdc6c67
RB
1976 { build_int_cst (type, 0); }
1977 (with { newmask = mask | zerobits; }
1978 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1979 (with
1980 {
1981 /* Only do the transformation if NEWMASK is some integer
1982 mode's mask. */
1983 for (prec = BITS_PER_UNIT;
1984 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 1985 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
1986 break;
1987 }
1988 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 1989 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
1990 (with
1991 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1992 (if (!tree_int_cst_equal (newmaskt, @2))
1993 (if (shift_type != TREE_TYPE (@3))
1994 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1995 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 1996
84ff66b8
AV
1997/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1998 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 1999(for shift (lshift rshift)
84ff66b8
AV
2000 (for bit_op (bit_and bit_xor bit_ior)
2001 (simplify
2002 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2003 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2004 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2005 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 2006
ad1d92ab
MM
2007/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2008(simplify
2009 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2010 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
2011 && (element_precision (TREE_TYPE (@0))
2012 <= element_precision (TREE_TYPE (@1))
2013 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
2014 (with
2015 { tree shift_type = TREE_TYPE (@0); }
2016 (convert (rshift (convert:shift_type @1) @2)))))
2017
2018/* ~(~X >>r Y) -> X >>r Y
2019 ~(~X <<r Y) -> X <<r Y */
2020(for rotate (lrotate rrotate)
2021 (simplify
2022 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
2023 (if ((element_precision (TREE_TYPE (@0))
2024 <= element_precision (TREE_TYPE (@1))
2025 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2026 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2027 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
2028 (with
2029 { tree rotate_type = TREE_TYPE (@0); }
2030 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 2031
d4573ffe
RB
2032/* Simplifications of conversions. */
2033
2034/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 2035(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
2036 (simplify
2037 (cvt @0)
2038 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2039 || (GENERIC && type == TREE_TYPE (@0)))
2040 @0)))
2041
2042/* Contract view-conversions. */
2043(simplify
2044 (view_convert (view_convert @0))
2045 (view_convert @0))
2046
2047/* For integral conversions with the same precision or pointer
2048 conversions use a NOP_EXPR instead. */
2049(simplify
2050 (view_convert @0)
2051 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2052 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2053 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2054 (convert @0)))
2055
bce8ef71
MG
2056/* Strip inner integral conversions that do not change precision or size, or
2057 zero-extend while keeping the same size (for bool-to-char). */
d4573ffe
RB
2058(simplify
2059 (view_convert (convert@0 @1))
2060 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2061 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
bce8ef71
MG
2062 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2063 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2064 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2065 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
d4573ffe
RB
2066 (view_convert @1)))
2067
2068/* Re-association barriers around constants and other re-association
2069 barriers can be removed. */
2070(simplify
2071 (paren CONSTANT_CLASS_P@0)
2072 @0)
2073(simplify
2074 (paren (paren@1 @0))
2075 @1)
1e51d0a2
RB
2076
2077/* Handle cases of two conversions in a row. */
2078(for ocvt (convert float fix_trunc)
2079 (for icvt (convert float)
2080 (simplify
2081 (ocvt (icvt@1 @0))
2082 (with
2083 {
2084 tree inside_type = TREE_TYPE (@0);
2085 tree inter_type = TREE_TYPE (@1);
2086 int inside_int = INTEGRAL_TYPE_P (inside_type);
2087 int inside_ptr = POINTER_TYPE_P (inside_type);
2088 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 2089 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
2090 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2091 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2092 int inter_int = INTEGRAL_TYPE_P (inter_type);
2093 int inter_ptr = POINTER_TYPE_P (inter_type);
2094 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 2095 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
2096 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2097 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2098 int final_int = INTEGRAL_TYPE_P (type);
2099 int final_ptr = POINTER_TYPE_P (type);
2100 int final_float = FLOAT_TYPE_P (type);
09240451 2101 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
2102 unsigned int final_prec = TYPE_PRECISION (type);
2103 int final_unsignedp = TYPE_UNSIGNED (type);
2104 }
64d3a1f0
RB
2105 (switch
2106 /* In addition to the cases of two conversions in a row
2107 handled below, if we are converting something to its own
2108 type via an object of identical or wider precision, neither
2109 conversion is needed. */
2110 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2111 || (GENERIC
2112 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2113 && (((inter_int || inter_ptr) && final_int)
2114 || (inter_float && final_float))
2115 && inter_prec >= final_prec)
2116 (ocvt @0))
2117
2118 /* Likewise, if the intermediate and initial types are either both
2119 float or both integer, we don't need the middle conversion if the
2120 former is wider than the latter and doesn't change the signedness
2121 (for integers). Avoid this if the final type is a pointer since
36088299 2122 then we sometimes need the middle conversion. */
64d3a1f0
RB
2123 (if (((inter_int && inside_int) || (inter_float && inside_float))
2124 && (final_int || final_float)
2125 && inter_prec >= inside_prec
36088299 2126 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
2127 (ocvt @0))
2128
2129 /* If we have a sign-extension of a zero-extended value, we can
2130 replace that by a single zero-extension. Likewise if the
2131 final conversion does not change precision we can drop the
2132 intermediate conversion. */
2133 (if (inside_int && inter_int && final_int
2134 && ((inside_prec < inter_prec && inter_prec < final_prec
2135 && inside_unsignedp && !inter_unsignedp)
2136 || final_prec == inter_prec))
2137 (ocvt @0))
2138
2139 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
2140 - some conversion is floating-point (overstrict for now), or
2141 - some conversion is a vector (overstrict for now), or
2142 - the intermediate type is narrower than both initial and
2143 final, or
2144 - the intermediate type and innermost type differ in signedness,
2145 and the outermost type is wider than the intermediate, or
2146 - the initial type is a pointer type and the precisions of the
2147 intermediate and final types differ, or
2148 - the final type is a pointer type and the precisions of the
2149 initial and intermediate types differ. */
64d3a1f0
RB
2150 (if (! inside_float && ! inter_float && ! final_float
2151 && ! inside_vec && ! inter_vec && ! final_vec
2152 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2153 && ! (inside_int && inter_int
2154 && inter_unsignedp != inside_unsignedp
2155 && inter_prec < final_prec)
2156 && ((inter_unsignedp && inter_prec > inside_prec)
2157 == (final_unsignedp && final_prec > inter_prec))
2158 && ! (inside_ptr && inter_prec != final_prec)
36088299 2159 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
2160 (ocvt @0))
2161
2162 /* A truncation to an unsigned type (a zero-extension) should be
2163 canonicalized as bitwise and of a mask. */
1d510e04
JJ
2164 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2165 && final_int && inter_int && inside_int
64d3a1f0
RB
2166 && final_prec == inside_prec
2167 && final_prec > inter_prec
2168 && inter_unsignedp)
2169 (convert (bit_and @0 { wide_int_to_tree
2170 (inside_type,
2171 wi::mask (inter_prec, false,
2172 TYPE_PRECISION (inside_type))); })))
2173
2174 /* If we are converting an integer to a floating-point that can
2175 represent it exactly and back to an integer, we can skip the
2176 floating-point conversion. */
2177 (if (GIMPLE /* PR66211 */
2178 && inside_int && inter_float && final_int &&
2179 (unsigned) significand_size (TYPE_MODE (inter_type))
2180 >= inside_prec - !inside_unsignedp)
2181 (convert @0)))))))
ea2042ba
RB
2182
2183/* If we have a narrowing conversion to an integral type that is fed by a
2184 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2185 masks off bits outside the final type (and nothing else). */
2186(simplify
2187 (convert (bit_and @0 INTEGER_CST@1))
2188 (if (INTEGRAL_TYPE_P (type)
2189 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2190 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2191 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2192 TYPE_PRECISION (type)), 0))
2193 (convert @0)))
a25454ea
RB
2194
2195
2196/* (X /[ex] A) * A -> X. */
2197(simplify
2eef1fc1
RB
2198 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2199 (convert @0))
eaeba53a 2200
a7f24614
RB
2201/* Canonicalization of binary operations. */
2202
2203/* Convert X + -C into X - C. */
2204(simplify
2205 (plus @0 REAL_CST@1)
2206 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 2207 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
2208 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2209 (minus @0 { tem; })))))
2210
6b6aa8d3 2211/* Convert x+x into x*2. */
a7f24614
RB
2212(simplify
2213 (plus @0 @0)
2214 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
2215 (mult @0 { build_real (type, dconst2); })
2216 (if (INTEGRAL_TYPE_P (type))
2217 (mult @0 { build_int_cst (type, 2); }))))
a7f24614
RB
2218
2219(simplify
2220 (minus integer_zerop @1)
2221 (negate @1))
2222
2223/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2224 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2225 (-ARG1 + ARG0) reduces to -ARG1. */
2226(simplify
2227 (minus real_zerop@0 @1)
2228 (if (fold_real_zero_addition_p (type, @0, 0))
2229 (negate @1)))
2230
2231/* Transform x * -1 into -x. */
2232(simplify
2233 (mult @0 integer_minus_onep)
2234 (negate @0))
eaeba53a 2235
b771c609
AM
2236/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2237 signed overflow for CST != 0 && CST != -1. */
2238(simplify
2239 (mult:c (mult:s @0 INTEGER_CST@1) @2)
2240 (if (TREE_CODE (@2) != INTEGER_CST
2241 && !integer_zerop (@1) && !integer_minus_onep (@1))
2242 (mult (mult @0 @2) @1)))
2243
96285749
RS
2244/* True if we can easily extract the real and imaginary parts of a complex
2245 number. */
2246(match compositional_complex
2247 (convert? (complex @0 @1)))
2248
eaeba53a
RB
2249/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2250(simplify
2251 (complex (realpart @0) (imagpart @0))
2252 @0)
2253(simplify
2254 (realpart (complex @0 @1))
2255 @0)
2256(simplify
2257 (imagpart (complex @0 @1))
2258 @1)
83633539 2259
77c028c5
MG
2260/* Sometimes we only care about half of a complex expression. */
2261(simplify
2262 (realpart (convert?:s (conj:s @0)))
2263 (convert (realpart @0)))
2264(simplify
2265 (imagpart (convert?:s (conj:s @0)))
2266 (convert (negate (imagpart @0))))
2267(for part (realpart imagpart)
2268 (for op (plus minus)
2269 (simplify
2270 (part (convert?:s@2 (op:s @0 @1)))
2271 (convert (op (part @0) (part @1))))))
2272(simplify
2273 (realpart (convert?:s (CEXPI:s @0)))
2274 (convert (COS @0)))
2275(simplify
2276 (imagpart (convert?:s (CEXPI:s @0)))
2277 (convert (SIN @0)))
2278
2279/* conj(conj(x)) -> x */
2280(simplify
2281 (conj (convert? (conj @0)))
2282 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2283 (convert @0)))
2284
2285/* conj({x,y}) -> {x,-y} */
2286(simplify
2287 (conj (convert?:s (complex:s @0 @1)))
2288 (with { tree itype = TREE_TYPE (type); }
2289 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
2290
2291/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2292(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2293 (simplify
2294 (bswap (bswap @0))
2295 @0)
2296 (simplify
2297 (bswap (bit_not (bswap @0)))
2298 (bit_not @0))
2299 (for bitop (bit_xor bit_ior bit_and)
2300 (simplify
2301 (bswap (bitop:c (bswap @0) @1))
2302 (bitop @0 (bswap @1)))))
96994de0
RB
2303
2304
2305/* Combine COND_EXPRs and VEC_COND_EXPRs. */
2306
2307/* Simplify constant conditions.
2308 Only optimize constant conditions when the selected branch
2309 has the same type as the COND_EXPR. This avoids optimizing
2310 away "c ? x : throw", where the throw has a void type.
2311 Note that we cannot throw away the fold-const.c variant nor
2312 this one as we depend on doing this transform before possibly
2313 A ? B : B -> B triggers and the fold-const.c one can optimize
2314 0 ? A : B to B even if A has side-effects. Something
2315 genmatch cannot handle. */
2316(simplify
2317 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
2318 (if (integer_zerop (@0))
2319 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2320 @2)
2321 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2322 @1)))
96994de0
RB
2323(simplify
2324 (vec_cond VECTOR_CST@0 @1 @2)
2325 (if (integer_all_onesp (@0))
8fdc6c67
RB
2326 @1
2327 (if (integer_zerop (@0))
2328 @2)))
96994de0 2329
b5481987
BC
2330/* Simplification moved from fold_cond_expr_with_comparison. It may also
2331 be extended. */
e2535011
BC
2332/* This pattern implements two kinds simplification:
2333
2334 Case 1)
2335 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
b5481987
BC
2336 1) Conversions are type widening from smaller type.
2337 2) Const c1 equals to c2 after canonicalizing comparison.
2338 3) Comparison has tree code LT, LE, GT or GE.
2339 This specific pattern is needed when (cmp (convert x) c) may not
2340 be simplified by comparison patterns because of multiple uses of
2341 x. It also makes sense here because simplifying across multiple
e2535011
BC
2342 referred var is always benefitial for complicated cases.
2343
2344 Case 2)
2345 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2346(for cmp (lt le gt ge eq)
b5481987 2347 (simplify
ae22bc5d 2348 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
b5481987
BC
2349 (with
2350 {
2351 tree from_type = TREE_TYPE (@1);
2352 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
ae22bc5d 2353 enum tree_code code = ERROR_MARK;
b5481987 2354
ae22bc5d
BC
2355 if (INTEGRAL_TYPE_P (from_type)
2356 && int_fits_type_p (@2, from_type)
b5481987
BC
2357 && (types_match (c1_type, from_type)
2358 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2359 && (TYPE_UNSIGNED (from_type)
2360 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2361 && (types_match (c2_type, from_type)
2362 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2363 && (TYPE_UNSIGNED (from_type)
2364 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2365 {
ae22bc5d 2366 if (cmp != EQ_EXPR)
b5481987 2367 {
e2535011
BC
2368 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2369 {
2370 /* X <= Y - 1 equals to X < Y. */
ae22bc5d 2371 if (cmp == LE_EXPR)
e2535011
BC
2372 code = LT_EXPR;
2373 /* X > Y - 1 equals to X >= Y. */
ae22bc5d 2374 if (cmp == GT_EXPR)
e2535011
BC
2375 code = GE_EXPR;
2376 }
2377 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2378 {
2379 /* X < Y + 1 equals to X <= Y. */
ae22bc5d 2380 if (cmp == LT_EXPR)
e2535011
BC
2381 code = LE_EXPR;
2382 /* X >= Y + 1 equals to X > Y. */
ae22bc5d 2383 if (cmp == GE_EXPR)
e2535011
BC
2384 code = GT_EXPR;
2385 }
ae22bc5d
BC
2386 if (code != ERROR_MARK
2387 || wi::to_widest (@2) == wi::to_widest (@3))
e2535011 2388 {
ae22bc5d 2389 if (cmp == LT_EXPR || cmp == LE_EXPR)
e2535011 2390 code = MIN_EXPR;
ae22bc5d 2391 if (cmp == GT_EXPR || cmp == GE_EXPR)
e2535011
BC
2392 code = MAX_EXPR;
2393 }
b5481987 2394 }
e2535011 2395 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
ae22bc5d
BC
2396 else if (int_fits_type_p (@3, from_type))
2397 code = EQ_EXPR;
b5481987
BC
2398 }
2399 }
2400 (if (code == MAX_EXPR)
21aaaf1e 2401 (convert (max @1 (convert @2)))
b5481987 2402 (if (code == MIN_EXPR)
21aaaf1e 2403 (convert (min @1 (convert @2)))
e2535011 2404 (if (code == EQ_EXPR)
ae22bc5d 2405 (convert (cond (eq @1 (convert @3))
21aaaf1e 2406 (convert:from_type @3) (convert:from_type @2)))))))))
b5481987 2407
714445ae
BC
2408/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2409
2410 1) OP is PLUS or MINUS.
2411 2) CMP is LT, LE, GT or GE.
2412 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2413
2414 This pattern also handles special cases like:
2415
2416 A) Operand x is a unsigned to signed type conversion and c1 is
2417 integer zero. In this case,
2418 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2419 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2420 B) Const c1 may not equal to (C3 op' C2). In this case we also
2421 check equality for (c1+1) and (c1-1) by adjusting comparison
2422 code.
2423
2424 TODO: Though signed type is handled by this pattern, it cannot be
2425 simplified at the moment because C standard requires additional
2426 type promotion. In order to match&simplify it here, the IR needs
2427 to be cleaned up by other optimizers, i.e, VRP. */
2428(for op (plus minus)
2429 (for cmp (lt le gt ge)
2430 (simplify
2431 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2432 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2433 (if (types_match (from_type, to_type)
2434 /* Check if it is special case A). */
2435 || (TYPE_UNSIGNED (from_type)
2436 && !TYPE_UNSIGNED (to_type)
2437 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2438 && integer_zerop (@1)
2439 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2440 (with
2441 {
2442 bool overflow = false;
2443 enum tree_code code, cmp_code = cmp;
2444 wide_int real_c1, c1 = @1, c2 = @2, c3 = @3;
2445 signop sgn = TYPE_SIGN (from_type);
2446
2447 /* Handle special case A), given x of unsigned type:
2448 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2449 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2450 if (!types_match (from_type, to_type))
2451 {
2452 if (cmp_code == LT_EXPR)
2453 cmp_code = GT_EXPR;
2454 if (cmp_code == GE_EXPR)
2455 cmp_code = LE_EXPR;
2456 c1 = wi::max_value (to_type);
2457 }
2458 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2459 compute (c3 op' c2) and check if it equals to c1 with op' being
2460 the inverted operator of op. Make sure overflow doesn't happen
2461 if it is undefined. */
2462 if (op == PLUS_EXPR)
2463 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2464 else
2465 real_c1 = wi::add (c3, c2, sgn, &overflow);
2466
2467 code = cmp_code;
2468 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2469 {
2470 /* Check if c1 equals to real_c1. Boundary condition is handled
2471 by adjusting comparison operation if necessary. */
2472 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2473 && !overflow)
2474 {
2475 /* X <= Y - 1 equals to X < Y. */
2476 if (cmp_code == LE_EXPR)
2477 code = LT_EXPR;
2478 /* X > Y - 1 equals to X >= Y. */
2479 if (cmp_code == GT_EXPR)
2480 code = GE_EXPR;
2481 }
2482 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2483 && !overflow)
2484 {
2485 /* X < Y + 1 equals to X <= Y. */
2486 if (cmp_code == LT_EXPR)
2487 code = LE_EXPR;
2488 /* X >= Y + 1 equals to X > Y. */
2489 if (cmp_code == GE_EXPR)
2490 code = GT_EXPR;
2491 }
2492 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2493 {
2494 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2495 code = MIN_EXPR;
2496 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2497 code = MAX_EXPR;
2498 }
2499 }
2500 }
2501 (if (code == MAX_EXPR)
2502 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2503 { wide_int_to_tree (from_type, c2); })
2504 (if (code == MIN_EXPR)
2505 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2506 { wide_int_to_tree (from_type, c2); })))))))))
2507
96994de0
RB
2508(for cnd (cond vec_cond)
2509 /* A ? B : (A ? X : C) -> A ? B : C. */
2510 (simplify
2511 (cnd @0 (cnd @0 @1 @2) @3)
2512 (cnd @0 @1 @3))
2513 (simplify
2514 (cnd @0 @1 (cnd @0 @2 @3))
2515 (cnd @0 @1 @3))
24a179f8
RB
2516 /* A ? B : (!A ? C : X) -> A ? B : C. */
2517 /* ??? This matches embedded conditions open-coded because genmatch
2518 would generate matching code for conditions in separate stmts only.
2519 The following is still important to merge then and else arm cases
2520 from if-conversion. */
2521 (simplify
2522 (cnd @0 @1 (cnd @2 @3 @4))
2523 (if (COMPARISON_CLASS_P (@0)
2524 && COMPARISON_CLASS_P (@2)
2525 && invert_tree_comparison
2526 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2527 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2528 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2529 (cnd @0 @1 @3)))
2530 (simplify
2531 (cnd @0 (cnd @1 @2 @3) @4)
2532 (if (COMPARISON_CLASS_P (@0)
2533 && COMPARISON_CLASS_P (@1)
2534 && invert_tree_comparison
2535 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2536 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2537 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2538 (cnd @0 @3 @4)))
96994de0
RB
2539
2540 /* A ? B : B -> B. */
2541 (simplify
2542 (cnd @0 @1 @1)
09240451 2543 @1)
96994de0 2544
09240451
MG
2545 /* !A ? B : C -> A ? C : B. */
2546 (simplify
2547 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2548 (cnd @0 @2 @1)))
f84e7fd6 2549
a3ca1bc5
RB
2550/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2551 return all -1 or all 0 results. */
f43d102e
RS
2552/* ??? We could instead convert all instances of the vec_cond to negate,
2553 but that isn't necessarily a win on its own. */
2554(simplify
a3ca1bc5 2555 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 2556 (if (VECTOR_TYPE_P (type)
4d8989d5 2557 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
f43d102e 2558 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 2559 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 2560 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 2561
a3ca1bc5 2562/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 2563(simplify
a3ca1bc5 2564 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 2565 (if (VECTOR_TYPE_P (type)
4d8989d5 2566 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
f43d102e 2567 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 2568 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 2569 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 2570
2ee05f1e 2571
f84e7fd6
RB
2572/* Simplifications of comparisons. */
2573
24f1db9c
RB
2574/* See if we can reduce the magnitude of a constant involved in a
2575 comparison by changing the comparison code. This is a canonicalization
2576 formerly done by maybe_canonicalize_comparison_1. */
2577(for cmp (le gt)
2578 acmp (lt ge)
2579 (simplify
2580 (cmp @0 INTEGER_CST@1)
2581 (if (tree_int_cst_sgn (@1) == -1)
2582 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2583(for cmp (ge lt)
2584 acmp (gt le)
2585 (simplify
2586 (cmp @0 INTEGER_CST@1)
2587 (if (tree_int_cst_sgn (@1) == 1)
2588 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2589
2590
f84e7fd6
RB
2591/* We can simplify a logical negation of a comparison to the
2592 inverted comparison. As we cannot compute an expression
2593 operator using invert_tree_comparison we have to simulate
2594 that with expression code iteration. */
2595(for cmp (tcc_comparison)
2596 icmp (inverted_tcc_comparison)
2597 ncmp (inverted_tcc_comparison_with_nans)
2598 /* Ideally we'd like to combine the following two patterns
2599 and handle some more cases by using
2600 (logical_inverted_value (cmp @0 @1))
2601 here but for that genmatch would need to "inline" that.
2602 For now implement what forward_propagate_comparison did. */
2603 (simplify
2604 (bit_not (cmp @0 @1))
2605 (if (VECTOR_TYPE_P (type)
2606 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2607 /* Comparison inversion may be impossible for trapping math,
2608 invert_tree_comparison will tell us. But we can't use
2609 a computed operator in the replacement tree thus we have
2610 to play the trick below. */
2611 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 2612 (cmp, HONOR_NANS (@0)); }
f84e7fd6 2613 (if (ic == icmp)
8fdc6c67
RB
2614 (icmp @0 @1)
2615 (if (ic == ncmp)
2616 (ncmp @0 @1))))))
f84e7fd6 2617 (simplify
09240451
MG
2618 (bit_xor (cmp @0 @1) integer_truep)
2619 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 2620 (cmp, HONOR_NANS (@0)); }
09240451 2621 (if (ic == icmp)
8fdc6c67
RB
2622 (icmp @0 @1)
2623 (if (ic == ncmp)
2624 (ncmp @0 @1))))))
e18c1d66 2625
2ee05f1e
RB
2626/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2627 ??? The transformation is valid for the other operators if overflow
2628 is undefined for the type, but performing it here badly interacts
2629 with the transformation in fold_cond_expr_with_comparison which
2630 attempts to synthetize ABS_EXPR. */
2631(for cmp (eq ne)
2632 (simplify
d9ba1961
RB
2633 (cmp (minus@2 @0 @1) integer_zerop)
2634 (if (single_use (@2))
2635 (cmp @0 @1))))
2ee05f1e
RB
2636
2637/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2638 signed arithmetic case. That form is created by the compiler
2639 often enough for folding it to be of value. One example is in
2640 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
2641(for cmp (simple_comparison)
2642 scmp (swapped_simple_comparison)
2ee05f1e 2643 (simplify
bc6e9db4 2644 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
2645 /* Handle unfolded multiplication by zero. */
2646 (if (integer_zerop (@1))
8fdc6c67
RB
2647 (cmp @1 @2)
2648 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
2649 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2650 && single_use (@3))
8fdc6c67
RB
2651 /* If @1 is negative we swap the sense of the comparison. */
2652 (if (tree_int_cst_sgn (@1) < 0)
2653 (scmp @0 @2)
2654 (cmp @0 @2))))))
2ee05f1e
RB
2655
2656/* Simplify comparison of something with itself. For IEEE
2657 floating-point, we can only do some of these simplifications. */
287f8f17 2658(for cmp (eq ge le)
2ee05f1e
RB
2659 (simplify
2660 (cmp @0 @0)
287f8f17 2661 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 2662 || ! HONOR_NANS (@0))
287f8f17
RB
2663 { constant_boolean_node (true, type); }
2664 (if (cmp != EQ_EXPR)
2665 (eq @0 @0)))))
2ee05f1e
RB
2666(for cmp (ne gt lt)
2667 (simplify
2668 (cmp @0 @0)
2669 (if (cmp != NE_EXPR
2670 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 2671 || ! HONOR_NANS (@0))
2ee05f1e 2672 { constant_boolean_node (false, type); })))
b5d3d787
RB
2673(for cmp (unle unge uneq)
2674 (simplify
2675 (cmp @0 @0)
2676 { constant_boolean_node (true, type); }))
dd53d197
MG
2677(for cmp (unlt ungt)
2678 (simplify
2679 (cmp @0 @0)
2680 (unordered @0 @0)))
b5d3d787
RB
2681(simplify
2682 (ltgt @0 @0)
2683 (if (!flag_trapping_math)
2684 { constant_boolean_node (false, type); }))
2ee05f1e
RB
2685
2686/* Fold ~X op ~Y as Y op X. */
07cdc2b8 2687(for cmp (simple_comparison)
2ee05f1e 2688 (simplify
7fe996ba
RB
2689 (cmp (bit_not@2 @0) (bit_not@3 @1))
2690 (if (single_use (@2) && single_use (@3))
2691 (cmp @1 @0))))
2ee05f1e
RB
2692
2693/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
2694(for cmp (simple_comparison)
2695 scmp (swapped_simple_comparison)
2ee05f1e 2696 (simplify
7fe996ba
RB
2697 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2698 (if (single_use (@2)
2699 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
2700 (scmp @0 (bit_not @1)))))
2701
07cdc2b8
RB
2702(for cmp (simple_comparison)
2703 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2704 (simplify
2705 (cmp (convert@2 @0) (convert? @1))
2706 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2707 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2708 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2709 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2710 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2711 (with
2712 {
2713 tree type1 = TREE_TYPE (@1);
2714 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2715 {
2716 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2717 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2718 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2719 type1 = float_type_node;
2720 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2721 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2722 type1 = double_type_node;
2723 }
2724 tree newtype
2725 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2726 ? TREE_TYPE (@0) : type1);
2727 }
2728 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2729 (cmp (convert:newtype @0) (convert:newtype @1))))))
2730
2731 (simplify
2732 (cmp @0 REAL_CST@1)
2733 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
2734 (switch
2735 /* a CMP (-0) -> a CMP 0 */
2736 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2737 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2738 /* x != NaN is always true, other ops are always false. */
2739 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2740 && ! HONOR_SNANS (@1))
2741 { constant_boolean_node (cmp == NE_EXPR, type); })
2742 /* Fold comparisons against infinity. */
2743 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2744 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2745 (with
2746 {
2747 REAL_VALUE_TYPE max;
2748 enum tree_code code = cmp;
2749 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2750 if (neg)
2751 code = swap_tree_comparison (code);
2752 }
2753 (switch
2754 /* x > +Inf is always false, if with ignore sNANs. */
2755 (if (code == GT_EXPR
2756 && ! HONOR_SNANS (@0))
2757 { constant_boolean_node (false, type); })
2758 (if (code == LE_EXPR)
2759 /* x <= +Inf is always true, if we don't case about NaNs. */
2760 (if (! HONOR_NANS (@0))
2761 { constant_boolean_node (true, type); }
b0eb889b 2762 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
64d3a1f0
RB
2763 (eq @0 @0)))
2764 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2765 (if (code == EQ_EXPR || code == GE_EXPR)
2766 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2767 (if (neg)
2768 (lt @0 { build_real (TREE_TYPE (@0), max); })
2769 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2770 /* x < +Inf is always equal to x <= DBL_MAX. */
2771 (if (code == LT_EXPR)
2772 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2773 (if (neg)
2774 (ge @0 { build_real (TREE_TYPE (@0), max); })
2775 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2776 /* x != +Inf is always equal to !(x > DBL_MAX). */
2777 (if (code == NE_EXPR)
2778 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2779 (if (! HONOR_NANS (@0))
2780 (if (neg)
2781 (ge @0 { build_real (TREE_TYPE (@0), max); })
2782 (le @0 { build_real (TREE_TYPE (@0), max); }))
2783 (if (neg)
2784 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2785 { build_one_cst (type); })
2786 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2787 { build_one_cst (type); }))))))))))
07cdc2b8
RB
2788
2789 /* If this is a comparison of a real constant with a PLUS_EXPR
2790 or a MINUS_EXPR of a real constant, we can convert it into a
2791 comparison with a revised real constant as long as no overflow
2792 occurs when unsafe_math_optimizations are enabled. */
2793 (if (flag_unsafe_math_optimizations)
2794 (for op (plus minus)
2795 (simplify
2796 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2797 (with
2798 {
2799 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2800 TREE_TYPE (@1), @2, @1);
2801 }
f980c9a2 2802 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
2803 (cmp @0 { tem; }))))))
2804
2805 /* Likewise, we can simplify a comparison of a real constant with
2806 a MINUS_EXPR whose first operand is also a real constant, i.e.
2807 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2808 floating-point types only if -fassociative-math is set. */
2809 (if (flag_associative_math)
2810 (simplify
0409237b 2811 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 2812 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 2813 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
2814 (cmp { tem; } @1)))))
2815
2816 /* Fold comparisons against built-in math functions. */
2817 (if (flag_unsafe_math_optimizations
2818 && ! flag_errno_math)
2819 (for sq (SQRT)
2820 (simplify
2821 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
2822 (switch
2823 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2824 (switch
2825 /* sqrt(x) < y is always false, if y is negative. */
2826 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 2827 { constant_boolean_node (false, type); })
64d3a1f0
RB
2828 /* sqrt(x) > y is always true, if y is negative and we
2829 don't care about NaNs, i.e. negative values of x. */
2830 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2831 { constant_boolean_node (true, type); })
2832 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2833 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
2834 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2835 (switch
2836 /* sqrt(x) < 0 is always false. */
2837 (if (cmp == LT_EXPR)
2838 { constant_boolean_node (false, type); })
2839 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2840 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2841 { constant_boolean_node (true, type); })
2842 /* sqrt(x) <= 0 -> x == 0. */
2843 (if (cmp == LE_EXPR)
2844 (eq @0 @1))
2845 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2846 == or !=. In the last case:
2847
2848 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2849
2850 if x is negative or NaN. Due to -funsafe-math-optimizations,
2851 the results for other x follow from natural arithmetic. */
2852 (cmp @0 @1)))
64d3a1f0
RB
2853 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2854 (with
2855 {
2856 REAL_VALUE_TYPE c2;
5c88ea94
RS
2857 real_arithmetic (&c2, MULT_EXPR,
2858 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
2859 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2860 }
2861 (if (REAL_VALUE_ISINF (c2))
2862 /* sqrt(x) > y is x == +Inf, when y is very large. */
2863 (if (HONOR_INFINITIES (@0))
2864 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2865 { constant_boolean_node (false, type); })
2866 /* sqrt(x) > c is the same as x > c*c. */
2867 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2868 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2869 (with
2870 {
2871 REAL_VALUE_TYPE c2;
5c88ea94
RS
2872 real_arithmetic (&c2, MULT_EXPR,
2873 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
2874 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2875 }
2876 (if (REAL_VALUE_ISINF (c2))
2877 (switch
2878 /* sqrt(x) < y is always true, when y is a very large
2879 value and we don't care about NaNs or Infinities. */
2880 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2881 { constant_boolean_node (true, type); })
2882 /* sqrt(x) < y is x != +Inf when y is very large and we
2883 don't care about NaNs. */
2884 (if (! HONOR_NANS (@0))
2885 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2886 /* sqrt(x) < y is x >= 0 when y is very large and we
2887 don't care about Infinities. */
2888 (if (! HONOR_INFINITIES (@0))
2889 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2890 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2891 (if (GENERIC)
2892 (truth_andif
2893 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2894 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2895 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2896 (if (! HONOR_NANS (@0))
2897 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2898 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2899 (if (GENERIC)
2900 (truth_andif
2901 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
0ca2e7f7
PK
2902 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
2903 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
2904 (simplify
2905 (cmp (sq @0) (sq @1))
2906 (if (! HONOR_NANS (@0))
2907 (cmp @0 @1))))))
2ee05f1e 2908
c779bea5
YG
2909/* Optimize various special cases of (FTYPE) N CMP CST. */
2910(for cmp (lt le eq ne ge gt)
2911 icmp (le le eq ne ge ge)
2912 (simplify
2913 (cmp (float @0) REAL_CST@1)
2914 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
2915 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
2916 (with
2917 {
2918 tree itype = TREE_TYPE (@0);
2919 signop isign = TYPE_SIGN (itype);
2920 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
2921 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
2922 /* Be careful to preserve any potential exceptions due to
2923 NaNs. qNaNs are ok in == or != context.
2924 TODO: relax under -fno-trapping-math or
2925 -fno-signaling-nans. */
2926 bool exception_p
2927 = real_isnan (cst) && (cst->signalling
c651dca2 2928 || (cmp != EQ_EXPR && cmp != NE_EXPR));
c779bea5
YG
2929 /* INT?_MIN is power-of-two so it takes
2930 only one mantissa bit. */
2931 bool signed_p = isign == SIGNED;
2932 bool itype_fits_ftype_p
2933 = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt);
2934 }
2935 /* TODO: allow non-fitting itype and SNaNs when
2936 -fno-trapping-math. */
2937 (if (itype_fits_ftype_p && ! exception_p)
2938 (with
2939 {
2940 REAL_VALUE_TYPE imin, imax;
2941 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
2942 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
2943
2944 REAL_VALUE_TYPE icst;
2945 if (cmp == GT_EXPR || cmp == GE_EXPR)
2946 real_ceil (&icst, fmt, cst);
2947 else if (cmp == LT_EXPR || cmp == LE_EXPR)
2948 real_floor (&icst, fmt, cst);
2949 else
2950 real_trunc (&icst, fmt, cst);
2951
2952 bool cst_int_p = real_identical (&icst, cst);
2953
2954 bool overflow_p = false;
2955 wide_int icst_val
2956 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
2957 }
2958 (switch
2959 /* Optimize cases when CST is outside of ITYPE's range. */
2960 (if (real_compare (LT_EXPR, cst, &imin))
2961 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
2962 type); })
2963 (if (real_compare (GT_EXPR, cst, &imax))
2964 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
2965 type); })
2966 /* Remove cast if CST is an integer representable by ITYPE. */
2967 (if (cst_int_p)
2968 (cmp @0 { gcc_assert (!overflow_p);
2969 wide_int_to_tree (itype, icst_val); })
2970 )
2971 /* When CST is fractional, optimize
2972 (FTYPE) N == CST -> 0
2973 (FTYPE) N != CST -> 1. */
2974 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2975 { constant_boolean_node (cmp == NE_EXPR, type); })
2976 /* Otherwise replace with sensible integer constant. */
2977 (with
2978 {
2979 gcc_checking_assert (!overflow_p);
2980 }
2981 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
2982
40fd269a
MG
2983/* Fold A /[ex] B CMP C to A CMP B * C. */
2984(for cmp (eq ne)
2985 (simplify
2986 (cmp (exact_div @0 @1) INTEGER_CST@2)
2987 (if (!integer_zerop (@1))
2988 (if (wi::eq_p (@2, 0))
2989 (cmp @0 @2)
2990 (if (TREE_CODE (@1) == INTEGER_CST)
2991 (with
2992 {
2993 bool ovf;
2994 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2995 }
2996 (if (ovf)
2997 { constant_boolean_node (cmp == NE_EXPR, type); }
2998 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
2999(for cmp (lt le gt ge)
3000 (simplify
3001 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3002 (if (wi::gt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1))))
3003 (with
3004 {
3005 bool ovf;
3006 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3007 }
3008 (if (ovf)
3009 { constant_boolean_node (wi::lt_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
3010 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3011 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3012
cfdc4f33
MG
3013/* Unordered tests if either argument is a NaN. */
3014(simplify
3015 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 3016 (if (types_match (@0, @1))
cfdc4f33 3017 (unordered @0 @1)))
257b01ba
MG
3018(simplify
3019 (bit_and (ordered @0 @0) (ordered @1 @1))
3020 (if (types_match (@0, @1))
3021 (ordered @0 @1)))
cfdc4f33
MG
3022(simplify
3023 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3024 @2)
257b01ba
MG
3025(simplify
3026 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3027 @2)
e18c1d66 3028
90c6f26c
RB
3029/* Simple range test simplifications. */
3030/* A < B || A >= B -> true. */
5d30c58d
RB
3031(for test1 (lt le le le ne ge)
3032 test2 (ge gt ge ne eq ne)
90c6f26c
RB
3033 (simplify
3034 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3035 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3036 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3037 { constant_boolean_node (true, type); })))
3038/* A < B && A >= B -> false. */
3039(for test1 (lt lt lt le ne eq)
3040 test2 (ge gt eq gt eq gt)
3041 (simplify
3042 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3043 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3044 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3045 { constant_boolean_node (false, type); })))
3046
9ebc3467
YG
3047/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3048 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3049
3050 Note that comparisons
3051 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3052 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3053 will be canonicalized to above so there's no need to
3054 consider them here.
3055 */
3056
3057(for cmp (le gt)
3058 eqcmp (eq ne)
3059 (simplify
3060 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3061 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3062 (with
3063 {
3064 tree ty = TREE_TYPE (@0);
3065 unsigned prec = TYPE_PRECISION (ty);
3066 wide_int mask = wi::to_wide (@2, prec);
3067 wide_int rhs = wi::to_wide (@3, prec);
3068 signop sgn = TYPE_SIGN (ty);
3069 }
3070 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3071 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3072 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3073 { build_zero_cst (ty); }))))))
3074
534bd33b
MG
3075/* -A CMP -B -> B CMP A. */
3076(for cmp (tcc_comparison)
3077 scmp (swapped_tcc_comparison)
3078 (simplify
3079 (cmp (negate @0) (negate @1))
3080 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3081 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3082 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3083 (scmp @0 @1)))
3084 (simplify
3085 (cmp (negate @0) CONSTANT_CLASS_P@1)
3086 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3087 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3088 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 3089 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
3090 (if (tem && !TREE_OVERFLOW (tem))
3091 (scmp @0 { tem; }))))))
3092
b0eb889b
MG
3093/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3094(for op (eq ne)
3095 (simplify
3096 (op (abs @0) zerop@1)
3097 (op @0 @1)))
3098
79d4f7c6
RB
3099/* From fold_sign_changed_comparison and fold_widened_comparison. */
3100(for cmp (simple_comparison)
3101 (simplify
3102 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 3103 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
3104 /* Disable this optimization if we're casting a function pointer
3105 type on targets that require function pointer canonicalization. */
3106 && !(targetm.have_canonicalize_funcptr_for_compare ()
3107 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
2fde61e3
RB
3108 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
3109 && single_use (@0))
79d4f7c6
RB
3110 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3111 && (TREE_CODE (@10) == INTEGER_CST
3112 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
3113 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3114 || cmp == NE_EXPR
3115 || cmp == EQ_EXPR)
3116 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
3117 /* ??? The special-casing of INTEGER_CST conversion was in the original
3118 code and here to avoid a spurious overflow flag on the resulting
3119 constant which fold_convert produces. */
3120 (if (TREE_CODE (@1) == INTEGER_CST)
3121 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3122 TREE_OVERFLOW (@1)); })
3123 (cmp @00 (convert @1)))
3124
3125 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3126 /* If possible, express the comparison in the shorter mode. */
3127 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
3128 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3129 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3130 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
3131 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3132 || ((TYPE_PRECISION (TREE_TYPE (@00))
3133 >= TYPE_PRECISION (TREE_TYPE (@10)))
3134 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3135 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3136 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 3137 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
3138 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3139 (cmp @00 (convert @10))
3140 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 3141 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
3142 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3143 (with
3144 {
3145 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3146 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3147 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3148 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3149 }
3150 (if (above || below)
3151 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3152 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3153 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3154 { constant_boolean_node (above ? true : false, type); }
3155 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3156 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 3157
96a111a3
RB
3158(for cmp (eq ne)
3159 /* A local variable can never be pointed to by
3160 the default SSA name of an incoming parameter.
3161 SSA names are canonicalized to 2nd place. */
3162 (simplify
3163 (cmp addr@0 SSA_NAME@1)
3164 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3165 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3166 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3167 (if (TREE_CODE (base) == VAR_DECL
3168 && auto_var_in_fn_p (base, current_function_decl))
3169 (if (cmp == NE_EXPR)
3170 { constant_boolean_node (true, type); }
3171 { constant_boolean_node (false, type); }))))))
3172
66e1cacf
RB
3173/* Equality compare simplifications from fold_binary */
3174(for cmp (eq ne)
3175
3176 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3177 Similarly for NE_EXPR. */
3178 (simplify
3179 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3180 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3181 && wi::bit_and_not (@1, @2) != 0)
3182 { constant_boolean_node (cmp == NE_EXPR, type); }))
3183
3184 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3185 (simplify
3186 (cmp (bit_xor @0 @1) integer_zerop)
3187 (cmp @0 @1))
3188
3189 /* (X ^ Y) == Y becomes X == 0.
3190 Likewise (X ^ Y) == X becomes Y == 0. */
3191 (simplify
99e943a2 3192 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
3193 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3194
3195 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3196 (simplify
3197 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3198 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 3199 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
3200
3201 (simplify
3202 (cmp (convert? addr@0) integer_zerop)
3203 (if (tree_single_nonzero_warnv_p (@0, NULL))
3204 { constant_boolean_node (cmp == NE_EXPR, type); })))
3205
b0eb889b
MG
3206/* If we have (A & C) == C where C is a power of 2, convert this into
3207 (A & C) != 0. Similarly for NE_EXPR. */
3208(for cmp (eq ne)
3209 icmp (ne eq)
3210 (simplify
3211 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3212 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3213
519e0faa
PB
3214/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3215 convert this into a shift followed by ANDing with D. */
3216(simplify
3217 (cond
3218 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3219 integer_pow2p@2 integer_zerop)
3220 (with {
3221 int shift = wi::exact_log2 (@2) - wi::exact_log2 (@1);
3222 }
3223 (if (shift > 0)
3224 (bit_and
3225 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3226 (bit_and
3227 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) @2))))
3228
b0eb889b
MG
3229/* If we have (A & C) != 0 where C is the sign bit of A, convert
3230 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3231(for cmp (eq ne)
3232 ncmp (ge lt)
3233 (simplify
3234 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3235 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2be65d9e 3236 && type_has_mode_precision_p (TREE_TYPE (@0))
b0eb889b
MG
3237 && element_precision (@2) >= element_precision (@0)
3238 && wi::only_sign_bit_p (@1, element_precision (@0)))
3239 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3240 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3241
519e0faa 3242/* If we have A < 0 ? C : 0 where C is a power of 2, convert
c0140e3c 3243 this into a right shift or sign extension followed by ANDing with C. */
519e0faa
PB
3244(simplify
3245 (cond
3246 (lt @0 integer_zerop)
3247 integer_pow2p@1 integer_zerop)
c0140e3c
JJ
3248 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
3249 (with {
519e0faa 3250 int shift = element_precision (@0) - wi::exact_log2 (@1) - 1;
c0140e3c
JJ
3251 }
3252 (if (shift >= 0)
3253 (bit_and
3254 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3255 @1)
3256 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3257 sign extension followed by AND with C will achieve the effect. */
3258 (bit_and (convert @0) @1)))))
519e0faa 3259
68aba1f6
RB
3260/* When the addresses are not directly of decls compare base and offset.
3261 This implements some remaining parts of fold_comparison address
3262 comparisons but still no complete part of it. Still it is good
3263 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3264(for cmp (simple_comparison)
3265 (simplify
f501d5cd 3266 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
3267 (with
3268 {
3269 HOST_WIDE_INT off0, off1;
3270 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3271 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3272 if (base0 && TREE_CODE (base0) == MEM_REF)
3273 {
3274 off0 += mem_ref_offset (base0).to_short_addr ();
3275 base0 = TREE_OPERAND (base0, 0);
3276 }
3277 if (base1 && TREE_CODE (base1) == MEM_REF)
3278 {
3279 off1 += mem_ref_offset (base1).to_short_addr ();
3280 base1 = TREE_OPERAND (base1, 0);
3281 }
3282 }
da571fda
RB
3283 (if (base0 && base1)
3284 (with
3285 {
aad88aed 3286 int equal = 2;
70f40fea
JJ
3287 /* Punt in GENERIC on variables with value expressions;
3288 the value expressions might point to fields/elements
3289 of other vars etc. */
3290 if (GENERIC
3291 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3292 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3293 ;
3294 else if (decl_in_symtab_p (base0)
3295 && decl_in_symtab_p (base1))
da571fda
RB
3296 equal = symtab_node::get_create (base0)
3297 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
3298 else if ((DECL_P (base0)
3299 || TREE_CODE (base0) == SSA_NAME
3300 || TREE_CODE (base0) == STRING_CST)
3301 && (DECL_P (base1)
3302 || TREE_CODE (base1) == SSA_NAME
3303 || TREE_CODE (base1) == STRING_CST))
aad88aed 3304 equal = (base0 == base1);
da571fda 3305 }
5e19d437 3306 (if (equal == 1)
da571fda
RB
3307 (switch
3308 (if (cmp == EQ_EXPR)
3309 { constant_boolean_node (off0 == off1, type); })
3310 (if (cmp == NE_EXPR)
3311 { constant_boolean_node (off0 != off1, type); })
3312 (if (cmp == LT_EXPR)
3313 { constant_boolean_node (off0 < off1, type); })
3314 (if (cmp == LE_EXPR)
3315 { constant_boolean_node (off0 <= off1, type); })
3316 (if (cmp == GE_EXPR)
3317 { constant_boolean_node (off0 >= off1, type); })
3318 (if (cmp == GT_EXPR)
3319 { constant_boolean_node (off0 > off1, type); }))
3320 (if (equal == 0
3321 && DECL_P (base0) && DECL_P (base1)
3322 /* If we compare this as integers require equal offset. */
3323 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3324 || off0 == off1))
3325 (switch
3326 (if (cmp == EQ_EXPR)
3327 { constant_boolean_node (false, type); })
3328 (if (cmp == NE_EXPR)
3329 { constant_boolean_node (true, type); })))))))))
66e1cacf 3330
98998245
RB
3331/* Simplify pointer equality compares using PTA. */
3332(for neeq (ne eq)
3333 (simplify
3334 (neeq @0 @1)
3335 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3336 && ptrs_compare_unequal (@0, @1))
3337 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
3338
8f63caf6 3339/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
3340 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3341 Disable the transform if either operand is pointer to function.
3342 This broke pr22051-2.c for arm where function pointer
3343 canonicalizaion is not wanted. */
1c0a8806 3344
8f63caf6
RB
3345(for cmp (ne eq)
3346 (simplify
3347 (cmp (convert @0) INTEGER_CST@1)
467719fb
PK
3348 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3349 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3350 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
3351 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
8f63caf6
RB
3352 (cmp @0 (convert @1)))))
3353
21aacde4
RB
3354/* Non-equality compare simplifications from fold_binary */
3355(for cmp (lt gt le ge)
3356 /* Comparisons with the highest or lowest possible integer of
3357 the specified precision will have known values. */
3358 (simplify
3359 (cmp (convert?@2 @0) INTEGER_CST@1)
3360 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3361 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3362 (with
3363 {
3364 tree arg1_type = TREE_TYPE (@1);
3365 unsigned int prec = TYPE_PRECISION (arg1_type);
3366 wide_int max = wi::max_value (arg1_type);
3367 wide_int signed_max = wi::max_value (prec, SIGNED);
3368 wide_int min = wi::min_value (arg1_type);
3369 }
3370 (switch
3371 (if (wi::eq_p (@1, max))
3372 (switch
3373 (if (cmp == GT_EXPR)
3374 { constant_boolean_node (false, type); })
3375 (if (cmp == GE_EXPR)
3376 (eq @2 @1))
3377 (if (cmp == LE_EXPR)
3378 { constant_boolean_node (true, type); })
3379 (if (cmp == LT_EXPR)
3380 (ne @2 @1))))
21aacde4
RB
3381 (if (wi::eq_p (@1, min))
3382 (switch
3383 (if (cmp == LT_EXPR)
3384 { constant_boolean_node (false, type); })
3385 (if (cmp == LE_EXPR)
3386 (eq @2 @1))
3387 (if (cmp == GE_EXPR)
3388 { constant_boolean_node (true, type); })
3389 (if (cmp == GT_EXPR)
3390 (ne @2 @1))))
9bc22d19
RB
3391 (if (wi::eq_p (@1, max - 1))
3392 (switch
3393 (if (cmp == GT_EXPR)
3394 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
3395 (if (cmp == LE_EXPR)
3396 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
21aacde4
RB
3397 (if (wi::eq_p (@1, min + 1))
3398 (switch
3399 (if (cmp == GE_EXPR)
3400 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
3401 (if (cmp == LT_EXPR)
3402 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
3403 (if (wi::eq_p (@1, signed_max)
3404 && TYPE_UNSIGNED (arg1_type)
3405 /* We will flip the signedness of the comparison operator
3406 associated with the mode of @1, so the sign bit is
3407 specified by this mode. Check that @1 is the signed
3408 max associated with this sign bit. */
7a504f33 3409 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
21aacde4
RB
3410 /* signed_type does not work on pointer types. */
3411 && INTEGRAL_TYPE_P (arg1_type))
3412 /* The following case also applies to X < signed_max+1
3413 and X >= signed_max+1 because previous transformations. */
3414 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3415 (with { tree st = signed_type_for (arg1_type); }
3416 (if (cmp == LE_EXPR)
3417 (ge (convert:st @0) { build_zero_cst (st); })
3418 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3419
b5d3d787
RB
3420(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3421 /* If the second operand is NaN, the result is constant. */
3422 (simplify
3423 (cmp @0 REAL_CST@1)
3424 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3425 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 3426 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 3427 ? false : true, type); })))
21aacde4 3428
55cf3946
RB
3429/* bool_var != 0 becomes bool_var. */
3430(simplify
b5d3d787 3431 (ne @0 integer_zerop)
55cf3946
RB
3432 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3433 && types_match (type, TREE_TYPE (@0)))
3434 (non_lvalue @0)))
3435/* bool_var == 1 becomes bool_var. */
3436(simplify
b5d3d787 3437 (eq @0 integer_onep)
55cf3946
RB
3438 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3439 && types_match (type, TREE_TYPE (@0)))
3440 (non_lvalue @0)))
b5d3d787
RB
3441/* Do not handle
3442 bool_var == 0 becomes !bool_var or
3443 bool_var != 1 becomes !bool_var
3444 here because that only is good in assignment context as long
3445 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3446 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3447 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 3448
ca1206be
MG
3449/* When one argument is a constant, overflow detection can be simplified.
3450 Currently restricted to single use so as not to interfere too much with
3451 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3452 A + CST CMP A -> A CMP' CST' */
3453(for cmp (lt le ge gt)
3454 out (gt gt le le)
3455 (simplify
a8e9f9a3 3456 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
3457 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3458 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3459 && wi::ne_p (@1, 0)
3460 && single_use (@2))
3461 (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
3462 (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
3463
3563f78f
MG
3464/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3465 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3466 expects the long form, so we restrict the transformation for now. */
3467(for cmp (gt le)
3468 (simplify
a8e9f9a3 3469 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
3470 (if (single_use (@2)
3471 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3472 && TYPE_UNSIGNED (TREE_TYPE (@0))
3473 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3474 (cmp @1 @0))))
3563f78f
MG
3475
3476/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
3477/* A - B > A */
3478(for cmp (gt le)
3479 out (ne eq)
3480 (simplify
a8e9f9a3 3481 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
3482 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3483 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3484 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3485/* A + B < A */
3486(for cmp (lt ge)
3487 out (ne eq)
3488 (simplify
a8e9f9a3 3489 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
3490 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3491 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3492 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3493
603aeb87 3494/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 3495 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
3496(for cmp (lt ge)
3497 out (ne eq)
3498 (simplify
603aeb87 3499 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
3500 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3501 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3502 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 3503
53f3cd25
RS
3504/* Simplification of math builtins. These rules must all be optimizations
3505 as well as IL simplifications. If there is a possibility that the new
3506 form could be a pessimization, the rule should go in the canonicalization
3507 section that follows this one.
e18c1d66 3508
53f3cd25
RS
3509 Rules can generally go in this section if they satisfy one of
3510 the following:
3511
3512 - the rule describes an identity
3513
3514 - the rule replaces calls with something as simple as addition or
3515 multiplication
3516
3517 - the rule contains unary calls only and simplifies the surrounding
3518 arithmetic. (The idea here is to exclude non-unary calls in which
3519 one operand is constant and in which the call is known to be cheap
3520 when the operand has that value.) */
52c6378a 3521
53f3cd25 3522(if (flag_unsafe_math_optimizations)
52c6378a
N
3523 /* Simplify sqrt(x) * sqrt(x) -> x. */
3524 (simplify
3525 (mult (SQRT@1 @0) @1)
3526 (if (!HONOR_SNANS (type))
3527 @0))
3528
ed17cb57
JW
3529 (for op (plus minus)
3530 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
3531 (simplify
3532 (op (rdiv @0 @1)
3533 (rdiv @2 @1))
3534 (rdiv (op @0 @2) @1)))
3535
35401640
N
3536 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3537 (for root (SQRT CBRT)
3538 (simplify
3539 (mult (root:s @0) (root:s @1))
3540 (root (mult @0 @1))))
3541
35401640
N
3542 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3543 (for exps (EXP EXP2 EXP10 POW10)
3544 (simplify
3545 (mult (exps:s @0) (exps:s @1))
3546 (exps (plus @0 @1))))
3547
52c6378a 3548 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
3549 (for root (SQRT CBRT)
3550 (simplify
3551 (rdiv @0 (root:s (rdiv:s @1 @2)))
3552 (mult @0 (root (rdiv @2 @1)))))
3553
3554 /* Simplify x/expN(y) into x*expN(-y). */
3555 (for exps (EXP EXP2 EXP10 POW10)
3556 (simplify
3557 (rdiv @0 (exps:s @1))
3558 (mult @0 (exps (negate @1)))))
52c6378a 3559
eee7b6c4
RB
3560 (for logs (LOG LOG2 LOG10 LOG10)
3561 exps (EXP EXP2 EXP10 POW10)
8acda9b2 3562 /* logN(expN(x)) -> x. */
e18c1d66
RB
3563 (simplify
3564 (logs (exps @0))
8acda9b2
RS
3565 @0)
3566 /* expN(logN(x)) -> x. */
3567 (simplify
3568 (exps (logs @0))
3569 @0))
53f3cd25 3570
e18c1d66
RB
3571 /* Optimize logN(func()) for various exponential functions. We
3572 want to determine the value "x" and the power "exponent" in
3573 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
3574 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3575 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
3576 (simplify
3577 (logs (exps @0))
c9e926ce
RS
3578 (if (SCALAR_FLOAT_TYPE_P (type))
3579 (with {
3580 tree x;
3581 switch (exps)
3582 {
3583 CASE_CFN_EXP:
3584 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3585 x = build_real_truncate (type, dconst_e ());
3586 break;
3587 CASE_CFN_EXP2:
3588 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3589 x = build_real (type, dconst2);
3590 break;
3591 CASE_CFN_EXP10:
3592 CASE_CFN_POW10:
3593 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3594 {
3595 REAL_VALUE_TYPE dconst10;
3596 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3597 x = build_real (type, dconst10);
3598 }
3599 break;
3600 default:
3601 gcc_unreachable ();
3602 }
3603 }
3604 (mult (logs { x; }) @0)))))
53f3cd25 3605
e18c1d66
RB
3606 (for logs (LOG LOG
3607 LOG2 LOG2
3608 LOG10 LOG10)
3609 exps (SQRT CBRT)
3610 (simplify
3611 (logs (exps @0))
c9e926ce
RS
3612 (if (SCALAR_FLOAT_TYPE_P (type))
3613 (with {
3614 tree x;
3615 switch (exps)
3616 {
3617 CASE_CFN_SQRT:
3618 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3619 x = build_real (type, dconsthalf);
3620 break;
3621 CASE_CFN_CBRT:
3622 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3623 x = build_real_truncate (type, dconst_third ());
3624 break;
3625 default:
3626 gcc_unreachable ();
3627 }
3628 }
3629 (mult { x; } (logs @0))))))
53f3cd25
RS
3630
3631 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
3632 (for logs (LOG LOG2 LOG10)
3633 pows (POW)
3634 (simplify
3635 (logs (pows @0 @1))
53f3cd25
RS
3636 (mult @1 (logs @0))))
3637
e83fe013
WD
3638 /* pow(C,x) -> exp(log(C)*x) if C > 0. */
3639 (for pows (POW)
3640 exps (EXP)
3641 logs (LOG)
3642 (simplify
3643 (pows REAL_CST@0 @1)
3644 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
3645 && real_isfinite (TREE_REAL_CST_PTR (@0)))
3646 (exps (mult (logs @0) @1)))))
3647
53f3cd25
RS
3648 (for sqrts (SQRT)
3649 cbrts (CBRT)
b4838d77 3650 pows (POW)
53f3cd25
RS
3651 exps (EXP EXP2 EXP10 POW10)
3652 /* sqrt(expN(x)) -> expN(x*0.5). */
3653 (simplify
3654 (sqrts (exps @0))
3655 (exps (mult @0 { build_real (type, dconsthalf); })))
3656 /* cbrt(expN(x)) -> expN(x/3). */
3657 (simplify
3658 (cbrts (exps @0))
b4838d77
RS
3659 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
3660 /* pow(expN(x), y) -> expN(x*y). */
3661 (simplify
3662 (pows (exps @0) @1)
3663 (exps (mult @0 @1))))
cfed37a0
RS
3664
3665 /* tan(atan(x)) -> x. */
3666 (for tans (TAN)
3667 atans (ATAN)
3668 (simplify
3669 (tans (atans @0))
3670 @0)))
53f3cd25 3671
abcc43f5
RS
3672/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
3673(simplify
e04d2a35 3674 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
3675 (abs @0))
3676
67dbe582
RS
3677/* trunc(trunc(x)) -> trunc(x), etc. */
3678(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3679 (simplify
3680 (fns (fns @0))
3681 (fns @0)))
3682/* f(x) -> x if x is integer valued and f does nothing for such values. */
afeb246c 3683(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
67dbe582
RS
3684 (simplify
3685 (fns integer_valued_real_p@0)
3686 @0))
67dbe582 3687
4d7836c4
RS
3688/* hypot(x,0) and hypot(0,x) -> abs(x). */
3689(simplify
c9e926ce 3690 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
3691 (abs @0))
3692
b4838d77
RS
3693/* pow(1,x) -> 1. */
3694(simplify
3695 (POW real_onep@0 @1)
3696 @0)
3697
461e4145
RS
3698(simplify
3699 /* copysign(x,x) -> x. */
3700 (COPYSIGN @0 @0)
3701 @0)
3702
3703(simplify
3704 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
3705 (COPYSIGN @0 tree_expr_nonnegative_p@1)
3706 (abs @0))
3707
86c0733f
RS
3708(for scale (LDEXP SCALBN SCALBLN)
3709 /* ldexp(0, x) -> 0. */
3710 (simplify
3711 (scale real_zerop@0 @1)
3712 @0)
3713 /* ldexp(x, 0) -> x. */
3714 (simplify
3715 (scale @0 integer_zerop@1)
3716 @0)
3717 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
3718 (simplify
3719 (scale REAL_CST@0 @1)
3720 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
3721 @0)))
3722
53f3cd25
RS
3723/* Canonicalization of sequences of math builtins. These rules represent
3724 IL simplifications but are not necessarily optimizations.
3725
3726 The sincos pass is responsible for picking "optimal" implementations
3727 of math builtins, which may be more complicated and can sometimes go
3728 the other way, e.g. converting pow into a sequence of sqrts.
3729 We only want to do these canonicalizations before the pass has run. */
3730
3731(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
3732 /* Simplify tan(x) * cos(x) -> sin(x). */
3733 (simplify
3734 (mult:c (TAN:s @0) (COS:s @0))
3735 (SIN @0))
3736
3737 /* Simplify x * pow(x,c) -> pow(x,c+1). */
3738 (simplify
de3fbea3 3739 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
3740 (if (!TREE_OVERFLOW (@1))
3741 (POW @0 (plus @1 { build_one_cst (type); }))))
3742
3743 /* Simplify sin(x) / cos(x) -> tan(x). */
3744 (simplify
3745 (rdiv (SIN:s @0) (COS:s @0))
3746 (TAN @0))
3747
3748 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
3749 (simplify
3750 (rdiv (COS:s @0) (SIN:s @0))
3751 (rdiv { build_one_cst (type); } (TAN @0)))
3752
3753 /* Simplify sin(x) / tan(x) -> cos(x). */
3754 (simplify
3755 (rdiv (SIN:s @0) (TAN:s @0))
3756 (if (! HONOR_NANS (@0)
3757 && ! HONOR_INFINITIES (@0))
c9e926ce 3758 (COS @0)))
53f3cd25
RS
3759
3760 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
3761 (simplify
3762 (rdiv (TAN:s @0) (SIN:s @0))
3763 (if (! HONOR_NANS (@0)
3764 && ! HONOR_INFINITIES (@0))
3765 (rdiv { build_one_cst (type); } (COS @0))))
3766
3767 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
3768 (simplify
3769 (mult (POW:s @0 @1) (POW:s @0 @2))
3770 (POW @0 (plus @1 @2)))
3771
3772 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
3773 (simplify
3774 (mult (POW:s @0 @1) (POW:s @2 @1))
3775 (POW (mult @0 @2) @1))
3776
de3fbea3
RB
3777 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
3778 (simplify
3779 (mult (POWI:s @0 @1) (POWI:s @2 @1))
3780 (POWI (mult @0 @2) @1))
3781
53f3cd25
RS
3782 /* Simplify pow(x,c) / x -> pow(x,c-1). */
3783 (simplify
3784 (rdiv (POW:s @0 REAL_CST@1) @0)
3785 (if (!TREE_OVERFLOW (@1))
3786 (POW @0 (minus @1 { build_one_cst (type); }))))
3787
3788 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
3789 (simplify
3790 (rdiv @0 (POW:s @1 @2))
3791 (mult @0 (POW @1 (negate @2))))
3792
3793 (for sqrts (SQRT)
3794 cbrts (CBRT)
3795 pows (POW)
3796 /* sqrt(sqrt(x)) -> pow(x,1/4). */
3797 (simplify
3798 (sqrts (sqrts @0))
3799 (pows @0 { build_real (type, dconst_quarter ()); }))
3800 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3801 (simplify
3802 (sqrts (cbrts @0))
3803 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3804 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3805 (simplify
3806 (cbrts (sqrts @0))
3807 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3808 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3809 (simplify
3810 (cbrts (cbrts tree_expr_nonnegative_p@0))
3811 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3812 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3813 (simplify
3814 (sqrts (pows @0 @1))
3815 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3816 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3817 (simplify
3818 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
3819 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3820 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3821 (simplify
3822 (pows (sqrts @0) @1)
3823 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3824 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
3825 (simplify
3826 (pows (cbrts tree_expr_nonnegative_p@0) @1)
3827 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3828 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
3829 (simplify
3830 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
3831 (pows @0 (mult @1 @2))))
abcc43f5
RS
3832
3833 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
3834 (simplify
3835 (CABS (complex @0 @0))
96285749
RS
3836 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3837
4d7836c4
RS
3838 /* hypot(x,x) -> fabs(x)*sqrt(2). */
3839 (simplify
3840 (HYPOT @0 @0)
3841 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3842
96285749
RS
3843 /* cexp(x+yi) -> exp(x)*cexpi(y). */
3844 (for cexps (CEXP)
3845 exps (EXP)
3846 cexpis (CEXPI)
3847 (simplify
3848 (cexps compositional_complex@0)
3849 (if (targetm.libc_has_function (function_c99_math_complex))
3850 (complex
3851 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
3852 (mult @1 (imagpart @2)))))))
e18c1d66 3853
67dbe582
RS
3854(if (canonicalize_math_p ())
3855 /* floor(x) -> trunc(x) if x is nonnegative. */
3856 (for floors (FLOOR)
3857 truncs (TRUNC)
3858 (simplify
3859 (floors tree_expr_nonnegative_p@0)
3860 (truncs @0))))
3861
3862(match double_value_p
3863 @0
3864 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
3865(for froms (BUILT_IN_TRUNCL
3866 BUILT_IN_FLOORL
3867 BUILT_IN_CEILL
3868 BUILT_IN_ROUNDL
3869 BUILT_IN_NEARBYINTL
3870 BUILT_IN_RINTL)
3871 tos (BUILT_IN_TRUNC
3872 BUILT_IN_FLOOR
3873 BUILT_IN_CEIL
3874 BUILT_IN_ROUND
3875 BUILT_IN_NEARBYINT
3876 BUILT_IN_RINT)
3877 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
3878 (if (optimize && canonicalize_math_p ())
3879 (simplify
3880 (froms (convert double_value_p@0))
3881 (convert (tos @0)))))
3882
3883(match float_value_p
3884 @0
3885 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
3886(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
3887 BUILT_IN_FLOORL BUILT_IN_FLOOR
3888 BUILT_IN_CEILL BUILT_IN_CEIL
3889 BUILT_IN_ROUNDL BUILT_IN_ROUND
3890 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
3891 BUILT_IN_RINTL BUILT_IN_RINT)
3892 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
3893 BUILT_IN_FLOORF BUILT_IN_FLOORF
3894 BUILT_IN_CEILF BUILT_IN_CEILF
3895 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
3896 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
3897 BUILT_IN_RINTF BUILT_IN_RINTF)
3898 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
3899 if x is a float. */
5dac7dbd
JDA
3900 (if (optimize && canonicalize_math_p ()
3901 && targetm.libc_has_function (function_c99_misc))
67dbe582
RS
3902 (simplify
3903 (froms (convert float_value_p@0))
3904 (convert (tos @0)))))
3905
543a9bcd
RS
3906(for froms (XFLOORL XCEILL XROUNDL XRINTL)
3907 tos (XFLOOR XCEIL XROUND XRINT)
3908 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
3909 (if (optimize && canonicalize_math_p ())
3910 (simplify
3911 (froms (convert double_value_p@0))
3912 (tos @0))))
3913
3914(for froms (XFLOORL XCEILL XROUNDL XRINTL
3915 XFLOOR XCEIL XROUND XRINT)
3916 tos (XFLOORF XCEILF XROUNDF XRINTF)
3917 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
3918 if x is a float. */
3919 (if (optimize && canonicalize_math_p ())
3920 (simplify
3921 (froms (convert float_value_p@0))
3922 (tos @0))))
3923
3924(if (canonicalize_math_p ())
3925 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
3926 (for floors (IFLOOR LFLOOR LLFLOOR)
3927 (simplify
3928 (floors tree_expr_nonnegative_p@0)
3929 (fix_trunc @0))))
3930
3931(if (canonicalize_math_p ())
3932 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
3933 (for fns (IFLOOR LFLOOR LLFLOOR
3934 ICEIL LCEIL LLCEIL
3935 IROUND LROUND LLROUND)
3936 (simplify
3937 (fns integer_valued_real_p@0)
3938 (fix_trunc @0)))
3939 (if (!flag_errno_math)
3940 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
3941 (for rints (IRINT LRINT LLRINT)
3942 (simplify
3943 (rints integer_valued_real_p@0)
3944 (fix_trunc @0)))))
3945
3946(if (canonicalize_math_p ())
3947 (for ifn (IFLOOR ICEIL IROUND IRINT)
3948 lfn (LFLOOR LCEIL LROUND LRINT)
3949 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
3950 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
3951 sizeof (int) == sizeof (long). */
3952 (if (TYPE_PRECISION (integer_type_node)
3953 == TYPE_PRECISION (long_integer_type_node))
3954 (simplify
3955 (ifn @0)
3956 (lfn:long_integer_type_node @0)))
3957 /* Canonicalize llround (x) to lround (x) on LP64 targets where
3958 sizeof (long long) == sizeof (long). */
3959 (if (TYPE_PRECISION (long_long_integer_type_node)
3960 == TYPE_PRECISION (long_integer_type_node))
3961 (simplify
3962 (llfn @0)
3963 (lfn:long_integer_type_node @0)))))
3964
92c52eab
RS
3965/* cproj(x) -> x if we're ignoring infinities. */
3966(simplify
3967 (CPROJ @0)
3968 (if (!HONOR_INFINITIES (type))
3969 @0))
3970
4534c203
RB
3971/* If the real part is inf and the imag part is known to be
3972 nonnegative, return (inf + 0i). */
3973(simplify
3974 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
3975 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
3976 { build_complex_inf (type, false); }))
3977
4534c203
RB
3978/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
3979(simplify
3980 (CPROJ (complex @0 REAL_CST@1))
3981 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 3982 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 3983
b4838d77
RS
3984(for pows (POW)
3985 sqrts (SQRT)
3986 cbrts (CBRT)
3987 (simplify
3988 (pows @0 REAL_CST@1)
3989 (with {
3990 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
3991 REAL_VALUE_TYPE tmp;
3992 }
3993 (switch
3994 /* pow(x,0) -> 1. */
3995 (if (real_equal (value, &dconst0))
3996 { build_real (type, dconst1); })
3997 /* pow(x,1) -> x. */
3998 (if (real_equal (value, &dconst1))
3999 @0)
4000 /* pow(x,-1) -> 1/x. */
4001 (if (real_equal (value, &dconstm1))
4002 (rdiv { build_real (type, dconst1); } @0))
4003 /* pow(x,0.5) -> sqrt(x). */
4004 (if (flag_unsafe_math_optimizations
4005 && canonicalize_math_p ()
4006 && real_equal (value, &dconsthalf))
4007 (sqrts @0))
4008 /* pow(x,1/3) -> cbrt(x). */
4009 (if (flag_unsafe_math_optimizations
4010 && canonicalize_math_p ()
4011 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4012 real_equal (value, &tmp)))
4013 (cbrts @0))))))
4534c203 4014
5ddc84ca
RS
4015/* powi(1,x) -> 1. */
4016(simplify
4017 (POWI real_onep@0 @1)
4018 @0)
4019
4020(simplify
4021 (POWI @0 INTEGER_CST@1)
4022 (switch
4023 /* powi(x,0) -> 1. */
4024 (if (wi::eq_p (@1, 0))
4025 { build_real (type, dconst1); })
4026 /* powi(x,1) -> x. */
4027 (if (wi::eq_p (@1, 1))
4028 @0)
4029 /* powi(x,-1) -> 1/x. */
4030 (if (wi::eq_p (@1, -1))
4031 (rdiv { build_real (type, dconst1); } @0))))
4032
be144838
JL
4033/* Narrowing of arithmetic and logical operations.
4034
4035 These are conceptually similar to the transformations performed for
4036 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4037 term we want to move all that code out of the front-ends into here. */
4038
4039/* If we have a narrowing conversion of an arithmetic operation where
4040 both operands are widening conversions from the same type as the outer
4041 narrowing conversion. Then convert the innermost operands to a suitable
9c582551 4042 unsigned type (to avoid introducing undefined behavior), perform the
be144838
JL
4043 operation and convert the result to the desired type. */
4044(for op (plus minus)
4045 (simplify
93f90bec 4046 (convert (op:s (convert@2 @0) (convert?@3 @1)))
be144838
JL
4047 (if (INTEGRAL_TYPE_P (type)
4048 /* We check for type compatibility between @0 and @1 below,
4049 so there's no need to check that @1/@3 are integral types. */
4050 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4051 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4052 /* The precision of the type of each operand must match the
4053 precision of the mode of each operand, similarly for the
4054 result. */
2be65d9e
RS
4055 && type_has_mode_precision_p (TREE_TYPE (@0))
4056 && type_has_mode_precision_p (TREE_TYPE (@1))
4057 && type_has_mode_precision_p (type)
be144838
JL
4058 /* The inner conversion must be a widening conversion. */
4059 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
93f90bec
BC
4060 && types_match (@0, type)
4061 && (types_match (@0, @1)
4062 /* Or the second operand is const integer or converted const
4063 integer from valueize. */
4064 || TREE_CODE (@1) == INTEGER_CST))
be144838 4065 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
93f90bec 4066 (op @0 (convert @1))
8fdc6c67 4067 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
93f90bec
BC
4068 (convert (op (convert:utype @0)
4069 (convert:utype @1))))))))
48451e8f
JL
4070
4071/* This is another case of narrowing, specifically when there's an outer
4072 BIT_AND_EXPR which masks off bits outside the type of the innermost
4073 operands. Like the previous case we have to convert the operands
9c582551 4074 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
4075 arithmetic operation. */
4076(for op (minus plus)
8fdc6c67
RB
4077 (simplify
4078 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4079 (if (INTEGRAL_TYPE_P (type)
4080 /* We check for type compatibility between @0 and @1 below,
4081 so there's no need to check that @1/@3 are integral types. */
4082 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4083 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4084 /* The precision of the type of each operand must match the
4085 precision of the mode of each operand, similarly for the
4086 result. */
2be65d9e
RS
4087 && type_has_mode_precision_p (TREE_TYPE (@0))
4088 && type_has_mode_precision_p (TREE_TYPE (@1))
4089 && type_has_mode_precision_p (type)
8fdc6c67
RB
4090 /* The inner conversion must be a widening conversion. */
4091 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4092 && types_match (@0, @1)
4093 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4094 <= TYPE_PRECISION (TREE_TYPE (@0)))
0a8c1e23
JL
4095 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4096 true, TYPE_PRECISION (type))) == 0))
8fdc6c67
RB
4097 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4098 (with { tree ntype = TREE_TYPE (@0); }
4099 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4100 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4101 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4102 (convert:utype @4))))))))
4f7a5692
MC
4103
4104/* Transform (@0 < @1 and @0 < @2) to use min,
4105 (@0 > @1 and @0 > @2) to use max */
4106(for op (lt le gt ge)
4107 ext (min min max max)
4108 (simplify
4618c453
RB
4109 (bit_and (op:cs @0 @1) (op:cs @0 @2))
4110 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4111 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
4112 (op @0 (ext @1 @2)))))
4113
7317ef4a
RS
4114(simplify
4115 /* signbit(x) -> 0 if x is nonnegative. */
4116 (SIGNBIT tree_expr_nonnegative_p@0)
4117 { integer_zero_node; })
4118
4119(simplify
4120 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4121 (SIGNBIT @0)
4122 (if (!HONOR_SIGNED_ZEROS (@0))
4123 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
4124
4125/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4126(for cmp (eq ne)
4127 (for op (plus minus)
4128 rop (minus plus)
4129 (simplify
4130 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4131 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4132 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4133 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4134 && !TYPE_SATURATING (TREE_TYPE (@0)))
4135 (with { tree res = int_const_binop (rop, @2, @1); }
75473a91
RB
4136 (if (TREE_OVERFLOW (res)
4137 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
a8b85ce9
MG
4138 { constant_boolean_node (cmp == NE_EXPR, type); }
4139 (if (single_use (@3))
4140 (cmp @0 { res; }))))))))
4141(for cmp (lt le gt ge)
4142 (for op (plus minus)
4143 rop (minus plus)
4144 (simplify
4145 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4146 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4147 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4148 (with { tree res = int_const_binop (rop, @2, @1); }
4149 (if (TREE_OVERFLOW (res))
4150 {
4151 fold_overflow_warning (("assuming signed overflow does not occur "
4152 "when simplifying conditional to constant"),
4153 WARN_STRICT_OVERFLOW_CONDITIONAL);
4154 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4155 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4156 bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
4157 != (op == MINUS_EXPR);
4158 constant_boolean_node (less == ovf_high, type);
4159 }
4160 (if (single_use (@3))
4161 (with
4162 {
4163 fold_overflow_warning (("assuming signed overflow does not occur "
4164 "when changing X +- C1 cmp C2 to "
4165 "X cmp C2 -+ C1"),
4166 WARN_STRICT_OVERFLOW_COMPARISON);
4167 }
4168 (cmp @0 { res; })))))))))
d3e40b76
RB
4169
4170/* Canonicalizations of BIT_FIELD_REFs. */
4171
4172(simplify
4173 (BIT_FIELD_REF @0 @1 @2)
4174 (switch
4175 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4176 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4177 (switch
4178 (if (integer_zerop (@2))
4179 (view_convert (realpart @0)))
4180 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4181 (view_convert (imagpart @0)))))
4182 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4183 && INTEGRAL_TYPE_P (type)
171f6f05
RB
4184 /* On GIMPLE this should only apply to register arguments. */
4185 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
4186 /* A bit-field-ref that referenced the full argument can be stripped. */
4187 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4188 && integer_zerop (@2))
4189 /* Low-parts can be reduced to integral conversions.
4190 ??? The following doesn't work for PDP endian. */
4191 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4192 /* Don't even think about BITS_BIG_ENDIAN. */
4193 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4194 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4195 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4196 ? (TYPE_PRECISION (TREE_TYPE (@0))
4197 - TYPE_PRECISION (type))
4198 : 0)) == 0)))
4199 (convert @0))))
4200
4201/* Simplify vector extracts. */
4202
4203(simplify
4204 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4205 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4206 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4207 || (VECTOR_TYPE_P (type)
4208 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4209 (with
4210 {
4211 tree ctor = (TREE_CODE (@0) == SSA_NAME
4212 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4213 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4214 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4215 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4216 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4217 }
4218 (if (n != 0
4219 && (idx % width) == 0
4220 && (n % width) == 0
4221 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
4222 (with
4223 {
4224 idx = idx / width;
4225 n = n / width;
4226 /* Constructor elements can be subvectors. */
4227 unsigned HOST_WIDE_INT k = 1;
4228 if (CONSTRUCTOR_NELTS (ctor) != 0)
4229 {
4230 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4231 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4232 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4233 }
4234 }
4235 (switch
4236 /* We keep an exact subset of the constructor elements. */
4237 (if ((idx % k) == 0 && (n % k) == 0)
4238 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4239 { build_constructor (type, NULL); }
4240 (with
4241 {
4242 idx /= k;
4243 n /= k;
4244 }
4245 (if (n == 1)
4246 (if (idx < CONSTRUCTOR_NELTS (ctor))
4247 { CONSTRUCTOR_ELT (ctor, idx)->value; }
4248 { build_zero_cst (type); })
4249 {
4250 vec<constructor_elt, va_gc> *vals;
4251 vec_alloc (vals, n);
4252 for (unsigned i = 0;
4253 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
4254 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4255 CONSTRUCTOR_ELT (ctor, idx + i)->value);
4256 build_constructor (type, vals);
4257 }))))
4258 /* The bitfield references a single constructor element. */
4259 (if (idx + n <= (idx / k + 1) * k)
4260 (switch
4261 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
4262 { build_zero_cst (type); })
4263 (if (n == k)
4264 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
4265 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
4266 @1 { bitsize_int ((idx % k) * width); })))))))))
92e29a5e
RB
4267
4268/* Simplify a bit extraction from a bit insertion for the cases with
4269 the inserted element fully covering the extraction or the insertion
4270 not touching the extraction. */
4271(simplify
4272 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4273 (with
4274 {
4275 unsigned HOST_WIDE_INT isize;
4276 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4277 isize = TYPE_PRECISION (TREE_TYPE (@1));
4278 else
4279 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4280 }
4281 (switch
4282 (if (wi::leu_p (@ipos, @rpos)
4283 && wi::leu_p (wi::add (@rpos, @rsize), wi::add (@ipos, isize)))
4284 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4285 wi::sub (@rpos, @ipos)); }))
4286 (if (wi::geu_p (@ipos, wi::add (@rpos, @rsize))
4287 || wi::geu_p (@rpos, wi::add (@ipos, isize)))
4288 (BIT_FIELD_REF @0 @rsize @rpos)))))