]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/match.pd
Restore --enable-objc-gc support
[thirdparty/gcc.git] / gcc / match.pd
CommitLineData
3d2cf79f
RB
1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
818ab71a 5 Copyright (C) 2014-2016 Free Software Foundation, Inc.
3d2cf79f
RB
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25
26/* Generic tree predicates we inherit. */
27(define_predicates
cc7b5acf 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
53a19317 29 integer_each_onep integer_truep integer_nonzerop
cc7b5acf 30 real_zerop real_onep real_minus_onep
b0eb889b 31 zerop
f3582e54 32 CONSTANT_CLASS_P
887ab609 33 tree_expr_nonnegative_p
67dbe582 34 integer_valued_real_p
53a19317
RB
35 integer_pow2p
36 HONOR_NANS)
e0ee10ed 37
f84e7fd6
RB
38/* Operator lists. */
39(define_operator_list tcc_comparison
40 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
41(define_operator_list inverted_tcc_comparison
42 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
43(define_operator_list inverted_tcc_comparison_with_nans
44 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
534bd33b
MG
45(define_operator_list swapped_tcc_comparison
46 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
07cdc2b8
RB
47(define_operator_list simple_comparison lt le eq ne ge gt)
48(define_operator_list swapped_simple_comparison gt ge eq ne le lt)
49
b1dc4a20 50#include "cfn-operators.pd"
257aecb4 51
543a9bcd
RS
52/* Define operand lists for math rounding functions {,i,l,ll}FN,
53 where the versions prefixed with "i" return an int, those prefixed with
54 "l" return a long and those prefixed with "ll" return a long long.
55
56 Also define operand lists:
57
58 X<FN>F for all float functions, in the order i, l, ll
59 X<FN> for all double functions, in the same order
60 X<FN>L for all long double functions, in the same order. */
61#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
543a9bcd
RS
62 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
63 BUILT_IN_L##FN##F \
64 BUILT_IN_LL##FN##F) \
65 (define_operator_list X##FN BUILT_IN_I##FN \
66 BUILT_IN_L##FN \
67 BUILT_IN_LL##FN) \
68 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
69 BUILT_IN_L##FN##L \
70 BUILT_IN_LL##FN##L)
71
543a9bcd
RS
72DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
73DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
74DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
75DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
f84e7fd6 76
e0ee10ed 77/* Simplifications of operations with one constant operand and
36a60e48 78 simplifications to constants or single values. */
e0ee10ed
RB
79
80(for op (plus pointer_plus minus bit_ior bit_xor)
81 (simplify
82 (op @0 integer_zerop)
83 (non_lvalue @0)))
84
a499aac5
RB
85/* 0 +p index -> (type)index */
86(simplify
87 (pointer_plus integer_zerop @1)
88 (non_lvalue (convert @1)))
89
a7f24614
RB
90/* See if ARG1 is zero and X + ARG1 reduces to X.
91 Likewise if the operands are reversed. */
92(simplify
93 (plus:c @0 real_zerop@1)
94 (if (fold_real_zero_addition_p (type, @1, 0))
95 (non_lvalue @0)))
96
97/* See if ARG1 is zero and X - ARG1 reduces to X. */
98(simplify
99 (minus @0 real_zerop@1)
100 (if (fold_real_zero_addition_p (type, @1, 1))
101 (non_lvalue @0)))
102
e0ee10ed
RB
103/* Simplify x - x.
104 This is unsafe for certain floats even in non-IEEE formats.
105 In IEEE, it is unsafe because it does wrong for NaNs.
106 Also note that operand_equal_p is always false if an operand
107 is volatile. */
108(simplify
a7f24614 109 (minus @0 @0)
1b457aa4 110 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
a7f24614 111 { build_zero_cst (type); }))
e0ee10ed
RB
112
113(simplify
a7f24614
RB
114 (mult @0 integer_zerop@1)
115 @1)
116
117/* Maybe fold x * 0 to 0. The expressions aren't the same
118 when x is NaN, since x * 0 is also NaN. Nor are they the
119 same in modes with signed zeros, since multiplying a
120 negative value by 0 gives -0, not +0. */
121(simplify
122 (mult @0 real_zerop@1)
8b5ee871 123 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
a7f24614
RB
124 @1))
125
126/* In IEEE floating point, x*1 is not equivalent to x for snans.
127 Likewise for complex arithmetic with signed zeros. */
128(simplify
129 (mult @0 real_onep)
8b5ee871
MG
130 (if (!HONOR_SNANS (type)
131 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
132 || !COMPLEX_FLOAT_TYPE_P (type)))
133 (non_lvalue @0)))
134
135/* Transform x * -1.0 into -x. */
136(simplify
137 (mult @0 real_minus_onep)
8b5ee871
MG
138 (if (!HONOR_SNANS (type)
139 && (!HONOR_SIGNED_ZEROS (type)
a7f24614
RB
140 || !COMPLEX_FLOAT_TYPE_P (type)))
141 (negate @0)))
e0ee10ed
RB
142
143/* Make sure to preserve divisions by zero. This is the reason why
144 we don't simplify x / x to 1 or 0 / x to 0. */
145(for op (mult trunc_div ceil_div floor_div round_div exact_div)
146 (simplify
147 (op @0 integer_onep)
148 (non_lvalue @0)))
149
a7f24614 150(for div (trunc_div ceil_div floor_div round_div exact_div)
da186c1f 151 /* X / -1 is -X. */
a7f24614 152 (simplify
09240451
MG
153 (div @0 integer_minus_onep@1)
154 (if (!TYPE_UNSIGNED (type))
da186c1f
RB
155 (negate @0)))
156 /* X / abs (X) is X < 0 ? -1 : 1. */
157 (simplify
158 (div @0 (abs @0))
159 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
160 && TYPE_OVERFLOW_UNDEFINED (type))
161 (cond (lt @0 { build_zero_cst (type); })
162 { build_minus_one_cst (type); } { build_one_cst (type); })))
163 /* X / -X is -1. */
164 (simplify
165 (div @0 (negate @0))
166 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
167 && TYPE_OVERFLOW_UNDEFINED (type))
168 { build_minus_one_cst (type); })))
a7f24614
RB
169
170/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
171 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
172(simplify
173 (floor_div @0 @1)
09240451
MG
174 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
175 && TYPE_UNSIGNED (type))
a7f24614
RB
176 (trunc_div @0 @1)))
177
28093105
RB
178/* Combine two successive divisions. Note that combining ceil_div
179 and floor_div is trickier and combining round_div even more so. */
180(for div (trunc_div exact_div)
c306cfaf
RB
181 (simplify
182 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
183 (with {
184 bool overflow_p;
185 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
186 }
187 (if (!overflow_p)
8fdc6c67
RB
188 (div @0 { wide_int_to_tree (type, mul); })
189 (if (TYPE_UNSIGNED (type)
190 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
191 { build_zero_cst (type); })))))
c306cfaf 192
a7f24614 193/* Optimize A / A to 1.0 if we don't care about
09240451 194 NaNs or Infinities. */
a7f24614
RB
195(simplify
196 (rdiv @0 @0)
09240451 197 (if (FLOAT_TYPE_P (type)
1b457aa4 198 && ! HONOR_NANS (type)
8b5ee871 199 && ! HONOR_INFINITIES (type))
09240451
MG
200 { build_one_cst (type); }))
201
202/* Optimize -A / A to -1.0 if we don't care about
203 NaNs or Infinities. */
204(simplify
e04d2a35 205 (rdiv:C @0 (negate @0))
09240451 206 (if (FLOAT_TYPE_P (type)
1b457aa4 207 && ! HONOR_NANS (type)
8b5ee871 208 && ! HONOR_INFINITIES (type))
09240451 209 { build_minus_one_cst (type); }))
a7f24614 210
8c6961ca
PK
211/* PR71078: x / abs(x) -> copysign (1.0, x) */
212(simplify
213 (rdiv:C (convert? @0) (convert? (abs @0)))
214 (if (SCALAR_FLOAT_TYPE_P (type)
215 && ! HONOR_NANS (type)
216 && ! HONOR_INFINITIES (type))
217 (switch
218 (if (types_match (type, float_type_node))
219 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
220 (if (types_match (type, double_type_node))
221 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
222 (if (types_match (type, long_double_type_node))
223 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
224
a7f24614
RB
225/* In IEEE floating point, x/1 is not equivalent to x for snans. */
226(simplify
227 (rdiv @0 real_onep)
8b5ee871 228 (if (!HONOR_SNANS (type))
a7f24614
RB
229 (non_lvalue @0)))
230
231/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
232(simplify
233 (rdiv @0 real_minus_onep)
8b5ee871 234 (if (!HONOR_SNANS (type))
a7f24614
RB
235 (negate @0)))
236
5711ac88
N
237(if (flag_reciprocal_math)
238 /* Convert (A/B)/C to A/(B*C) */
239 (simplify
240 (rdiv (rdiv:s @0 @1) @2)
241 (rdiv @0 (mult @1 @2)))
242
243 /* Convert A/(B/C) to (A/B)*C */
244 (simplify
245 (rdiv @0 (rdiv:s @1 @2))
246 (mult (rdiv @0 @1) @2)))
247
248/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
249(for div (trunc_div ceil_div floor_div round_div exact_div)
250 (simplify
251 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
252 (if (integer_pow2p (@2)
253 && tree_int_cst_sgn (@2) > 0
254 && wi::add (@2, @1) == 0
255 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
256 (rshift (convert @0) { build_int_cst (integer_type_node,
257 wi::exact_log2 (@2)); }))))
258
a7f24614
RB
259/* If ARG1 is a constant, we can convert this to a multiply by the
260 reciprocal. This does not have the same rounding properties,
261 so only do this if -freciprocal-math. We can actually
262 always safely do it if ARG1 is a power of two, but it's hard to
263 tell if it is or not in a portable manner. */
264(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
265 (simplify
266 (rdiv @0 cst@1)
267 (if (optimize)
53bc4b3a
RB
268 (if (flag_reciprocal_math
269 && !real_zerop (@1))
a7f24614 270 (with
249700b5 271 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
a7f24614 272 (if (tem)
8fdc6c67
RB
273 (mult @0 { tem; } )))
274 (if (cst != COMPLEX_CST)
275 (with { tree inverse = exact_inverse (type, @1); }
276 (if (inverse)
277 (mult @0 { inverse; } ))))))))
a7f24614 278
e0ee10ed
RB
279/* Same applies to modulo operations, but fold is inconsistent here
280 and simplifies 0 % x to 0, only preserving literal 0 % 0. */
a7f24614 281(for mod (ceil_mod floor_mod round_mod trunc_mod)
e0ee10ed
RB
282 /* 0 % X is always zero. */
283 (simplify
a7f24614 284 (mod integer_zerop@0 @1)
e0ee10ed
RB
285 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
286 (if (!integer_zerop (@1))
287 @0))
288 /* X % 1 is always zero. */
289 (simplify
a7f24614
RB
290 (mod @0 integer_onep)
291 { build_zero_cst (type); })
292 /* X % -1 is zero. */
293 (simplify
09240451
MG
294 (mod @0 integer_minus_onep@1)
295 (if (!TYPE_UNSIGNED (type))
bc4315fb
MG
296 { build_zero_cst (type); }))
297 /* (X % Y) % Y is just X % Y. */
298 (simplify
299 (mod (mod@2 @0 @1) @1)
98e30e51
RB
300 @2)
301 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
302 (simplify
303 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
304 (if (ANY_INTEGRAL_TYPE_P (type)
305 && TYPE_OVERFLOW_UNDEFINED (type)
306 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
307 { build_zero_cst (type); })))
a7f24614
RB
308
309/* X % -C is the same as X % C. */
310(simplify
311 (trunc_mod @0 INTEGER_CST@1)
312 (if (TYPE_SIGN (type) == SIGNED
313 && !TREE_OVERFLOW (@1)
314 && wi::neg_p (@1)
315 && !TYPE_OVERFLOW_TRAPS (type)
316 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
317 && !sign_bit_p (@1, @1))
318 (trunc_mod @0 (negate @1))))
e0ee10ed 319
8f0c696a
RB
320/* X % -Y is the same as X % Y. */
321(simplify
322 (trunc_mod @0 (convert? (negate @1)))
a2a743a1
MP
323 (if (INTEGRAL_TYPE_P (type)
324 && !TYPE_UNSIGNED (type)
8f0c696a 325 && !TYPE_OVERFLOW_TRAPS (type)
20b8d734
JJ
326 && tree_nop_conversion_p (type, TREE_TYPE (@1))
327 /* Avoid this transformation if X might be INT_MIN or
328 Y might be -1, because we would then change valid
329 INT_MIN % -(-1) into invalid INT_MIN % -1. */
330 && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
331 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
332 (TREE_TYPE (@1))))))
8f0c696a
RB
333 (trunc_mod @0 (convert @1))))
334
f461569a
MP
335/* X - (X / Y) * Y is the same as X % Y. */
336(simplify
fba46f03
MG
337 (minus (convert1? @2) (convert2? (mult:c (trunc_div @0 @1) @1)))
338 /* We cannot use matching captures here, since in the case of
339 constants we really want the type of @0, not @2. */
340 (if (operand_equal_p (@0, @2, 0)
341 && (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)))
342 (convert (trunc_mod @0 @1))))
f461569a 343
8f0c696a
RB
344/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
345 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
346 Also optimize A % (C << N) where C is a power of 2,
347 to A & ((C << N) - 1). */
348(match (power_of_two_cand @1)
349 INTEGER_CST@1)
350(match (power_of_two_cand @1)
351 (lshift INTEGER_CST@1 @2))
352(for mod (trunc_mod floor_mod)
353 (simplify
4ab1e111 354 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
8f0c696a
RB
355 (if ((TYPE_UNSIGNED (type)
356 || tree_expr_nonnegative_p (@0))
4ab1e111 357 && tree_nop_conversion_p (type, TREE_TYPE (@3))
8f0c696a 358 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
4ab1e111 359 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
8f0c696a 360
887ab609
N
361/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
362(simplify
363 (trunc_div (mult @0 integer_pow2p@1) @1)
364 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
365 (bit_and @0 { wide_int_to_tree
366 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
367 false, TYPE_PRECISION (type))); })))
368
5f8d832e
N
369/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
370(simplify
371 (mult (trunc_div @0 integer_pow2p@1) @1)
372 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
373 (bit_and @0 (negate @1))))
374
95765f36
N
375/* Simplify (t * 2) / 2) -> t. */
376(for div (trunc_div ceil_div floor_div round_div exact_div)
377 (simplify
378 (div (mult @0 @1) @1)
379 (if (ANY_INTEGRAL_TYPE_P (type)
380 && TYPE_OVERFLOW_UNDEFINED (type))
381 @0)))
382
d202f9bd 383(for op (negate abs)
9b054b08
RS
384 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
385 (for coss (COS COSH)
386 (simplify
387 (coss (op @0))
388 (coss @0)))
389 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
390 (for pows (POW)
391 (simplify
392 (pows (op @0) REAL_CST@1)
393 (with { HOST_WIDE_INT n; }
394 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
5d3498b4 395 (pows @0 @1)))))
de3fbea3
RB
396 /* Likewise for powi. */
397 (for pows (POWI)
398 (simplify
399 (pows (op @0) INTEGER_CST@1)
400 (if (wi::bit_and (@1, 1) == 0)
401 (pows @0 @1))))
5d3498b4
RS
402 /* Strip negate and abs from both operands of hypot. */
403 (for hypots (HYPOT)
404 (simplify
405 (hypots (op @0) @1)
406 (hypots @0 @1))
407 (simplify
408 (hypots @0 (op @1))
409 (hypots @0 @1)))
410 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
411 (for copysigns (COPYSIGN)
412 (simplify
413 (copysigns (op @0) @1)
414 (copysigns @0 @1))))
415
416/* abs(x)*abs(x) -> x*x. Should be valid for all types. */
417(simplify
418 (mult (abs@1 @0) @1)
419 (mult @0 @0))
420
421/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
422(for coss (COS COSH)
423 copysigns (COPYSIGN)
424 (simplify
425 (coss (copysigns @0 @1))
426 (coss @0)))
427
428/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
429(for pows (POW)
430 copysigns (COPYSIGN)
431 (simplify
de3fbea3 432 (pows (copysigns @0 @2) REAL_CST@1)
5d3498b4
RS
433 (with { HOST_WIDE_INT n; }
434 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
435 (pows @0 @1)))))
de3fbea3
RB
436/* Likewise for powi. */
437(for pows (POWI)
438 copysigns (COPYSIGN)
439 (simplify
440 (pows (copysigns @0 @2) INTEGER_CST@1)
441 (if (wi::bit_and (@1, 1) == 0)
442 (pows @0 @1))))
5d3498b4
RS
443
444(for hypots (HYPOT)
445 copysigns (COPYSIGN)
446 /* hypot(copysign(x, y), z) -> hypot(x, z). */
447 (simplify
448 (hypots (copysigns @0 @1) @2)
449 (hypots @0 @2))
450 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
451 (simplify
452 (hypots @0 (copysigns @1 @2))
453 (hypots @0 @1)))
454
455/* copysign(copysign(x, y), z) -> copysign(x, z). */
456(for copysigns (COPYSIGN)
457 (simplify
458 (copysigns (copysigns @0 @1) @2)
459 (copysigns @0 @2)))
460
461/* copysign(x,y)*copysign(x,y) -> x*x. */
462(for copysigns (COPYSIGN)
463 (simplify
464 (mult (copysigns@2 @0 @1) @2)
465 (mult @0 @0)))
466
467/* ccos(-x) -> ccos(x). Similarly for ccosh. */
468(for ccoss (CCOS CCOSH)
469 (simplify
470 (ccoss (negate @0))
471 (ccoss @0)))
d202f9bd 472
abcc43f5
RS
473/* cabs(-x) and cos(conj(x)) -> cabs(x). */
474(for ops (conj negate)
475 (for cabss (CABS)
476 (simplify
477 (cabss (ops @0))
478 (cabss @0))))
479
0a8f32b8
RB
480/* Fold (a * (1 << b)) into (a << b) */
481(simplify
482 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
483 (if (! FLOAT_TYPE_P (type)
9ff6fb6e 484 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
0a8f32b8
RB
485 (lshift @0 @2)))
486
487/* Fold (C1/X)*C2 into (C1*C2)/X. */
488(simplify
ff86345f
RB
489 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
490 (if (flag_associative_math
491 && single_use (@3))
0a8f32b8
RB
492 (with
493 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
494 (if (tem)
495 (rdiv { tem; } @1)))))
496
5711ac88
N
497/* Convert C1/(X*C2) into (C1/C2)/X */
498(simplify
499 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
500 (if (flag_reciprocal_math)
501 (with
502 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
503 (if (tem)
504 (rdiv { tem; } @1)))))
505
0a8f32b8
RB
506/* Simplify ~X & X as zero. */
507(simplify
508 (bit_and:c (convert? @0) (convert? (bit_not @0)))
509 { build_zero_cst (type); })
510
10158317
RB
511/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
512(simplify
a9658b11 513 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
10158317
RB
514 (minus (bit_xor @0 @1) @1))
515(simplify
516 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
517 (if (wi::bit_not (@2) == @1)
518 (minus (bit_xor @0 @1) @1)))
519
520/* Fold (A & B) - (A & ~B) into B - (A ^ B). */
521(simplify
a8e9f9a3 522 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
10158317
RB
523 (minus @1 (bit_xor @0 @1)))
524
525/* Simplify (X & ~Y) | (~X & Y) -> X ^ Y. */
526(simplify
a9658b11 527 (bit_ior (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
10158317
RB
528 (bit_xor @0 @1))
529(simplify
530 (bit_ior:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
531 (if (wi::bit_not (@2) == @1)
532 (bit_xor @0 @1)))
d982c5b7
MG
533/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
534#if GIMPLE
535(simplify
536 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
537 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
538 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
539 (bit_xor @0 @1)))
540#endif
10158317 541
bc4315fb
MG
542/* X % Y is smaller than Y. */
543(for cmp (lt ge)
544 (simplify
545 (cmp (trunc_mod @0 @1) @1)
546 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
547 { constant_boolean_node (cmp == LT_EXPR, type); })))
548(for cmp (gt le)
549 (simplify
550 (cmp @1 (trunc_mod @0 @1))
551 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
552 { constant_boolean_node (cmp == GT_EXPR, type); })))
553
e0ee10ed
RB
554/* x | ~0 -> ~0 */
555(simplify
ca0b7ece
RB
556 (bit_ior @0 integer_all_onesp@1)
557 @1)
558
559/* x | 0 -> x */
560(simplify
561 (bit_ior @0 integer_zerop)
562 @0)
e0ee10ed
RB
563
564/* x & 0 -> 0 */
565(simplify
ca0b7ece
RB
566 (bit_and @0 integer_zerop@1)
567 @1)
e0ee10ed 568
a4398a30 569/* ~x | x -> -1 */
8b5ee871
MG
570/* ~x ^ x -> -1 */
571/* ~x + x -> -1 */
572(for op (bit_ior bit_xor plus)
573 (simplify
574 (op:c (convert? @0) (convert? (bit_not @0)))
575 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
a4398a30 576
e0ee10ed
RB
577/* x ^ x -> 0 */
578(simplify
579 (bit_xor @0 @0)
580 { build_zero_cst (type); })
581
36a60e48
RB
582/* Canonicalize X ^ ~0 to ~X. */
583(simplify
584 (bit_xor @0 integer_all_onesp@1)
585 (bit_not @0))
586
587/* x & ~0 -> x */
588(simplify
589 (bit_and @0 integer_all_onesp)
590 (non_lvalue @0))
591
592/* x & x -> x, x | x -> x */
593(for bitop (bit_and bit_ior)
594 (simplify
595 (bitop @0 @0)
596 (non_lvalue @0)))
597
c7986356
MG
598/* x & C -> x if we know that x & ~C == 0. */
599#if GIMPLE
600(simplify
601 (bit_and SSA_NAME@0 INTEGER_CST@1)
602 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
603 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
604 @0))
605#endif
606
0f770b01
RV
607/* x + (x & 1) -> (x + 1) & ~1 */
608(simplify
44fc0a51
RB
609 (plus:c @0 (bit_and:s @0 integer_onep@1))
610 (bit_and (plus @0 @1) (bit_not @1)))
0f770b01
RV
611
612/* x & ~(x & y) -> x & ~y */
613/* x | ~(x | y) -> x | ~y */
614(for bitop (bit_and bit_ior)
af563d4b 615 (simplify
44fc0a51
RB
616 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
617 (bitop @0 (bit_not @1))))
af563d4b
MG
618
619/* (x | y) & ~x -> y & ~x */
620/* (x & y) | ~x -> y | ~x */
621(for bitop (bit_and bit_ior)
622 rbitop (bit_ior bit_and)
623 (simplify
624 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
625 (bitop @1 @2)))
0f770b01 626
f13c4673
MP
627/* (x & y) ^ (x | y) -> x ^ y */
628(simplify
2d6f2dce
MP
629 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
630 (bit_xor @0 @1))
f13c4673 631
9ea65ca6
MP
632/* (x ^ y) ^ (x | y) -> x & y */
633(simplify
634 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
635 (bit_and @0 @1))
636
637/* (x & y) + (x ^ y) -> x | y */
638/* (x & y) | (x ^ y) -> x | y */
639/* (x & y) ^ (x ^ y) -> x | y */
640(for op (plus bit_ior bit_xor)
641 (simplify
642 (op:c (bit_and @0 @1) (bit_xor @0 @1))
643 (bit_ior @0 @1)))
644
645/* (x & y) + (x | y) -> x + y */
646(simplify
647 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
648 (plus @0 @1))
649
9737efaf
MP
650/* (x + y) - (x | y) -> x & y */
651(simplify
652 (minus (plus @0 @1) (bit_ior @0 @1))
653 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
654 && !TYPE_SATURATING (type))
655 (bit_and @0 @1)))
656
657/* (x + y) - (x & y) -> x | y */
658(simplify
659 (minus (plus @0 @1) (bit_and @0 @1))
660 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
661 && !TYPE_SATURATING (type))
662 (bit_ior @0 @1)))
663
9ea65ca6
MP
664/* (x | y) - (x ^ y) -> x & y */
665(simplify
666 (minus (bit_ior @0 @1) (bit_xor @0 @1))
667 (bit_and @0 @1))
668
669/* (x | y) - (x & y) -> x ^ y */
670(simplify
671 (minus (bit_ior @0 @1) (bit_and @0 @1))
672 (bit_xor @0 @1))
673
66cc6273
MP
674/* (x | y) & ~(x & y) -> x ^ y */
675(simplify
676 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
677 (bit_xor @0 @1))
678
679/* (x | y) & (~x ^ y) -> x & y */
680(simplify
681 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
682 (bit_and @0 @1))
683
5b00d921
RB
684/* ~x & ~y -> ~(x | y)
685 ~x | ~y -> ~(x & y) */
686(for op (bit_and bit_ior)
687 rop (bit_ior bit_and)
688 (simplify
689 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
690 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
691 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
692 (bit_not (rop (convert @0) (convert @1))))))
693
14ea9f92 694/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
5b00d921
RB
695 with a constant, and the two constants have no bits in common,
696 we should treat this as a BIT_IOR_EXPR since this may produce more
697 simplifications. */
14ea9f92
RB
698(for op (bit_xor plus)
699 (simplify
700 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
701 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
702 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
703 && tree_nop_conversion_p (type, TREE_TYPE (@2))
704 && wi::bit_and (@1, @3) == 0)
705 (bit_ior (convert @4) (convert @5)))))
5b00d921
RB
706
707/* (X | Y) ^ X -> Y & ~ X*/
708(simplify
709 (bit_xor:c (convert? (bit_ior:c @0 @1)) (convert? @0))
710 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
711 (convert (bit_and @1 (bit_not @0)))))
712
713/* Convert ~X ^ ~Y to X ^ Y. */
714(simplify
715 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
ece46666
MG
716 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
717 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
5b00d921
RB
718 (bit_xor (convert @0) (convert @1))))
719
720/* Convert ~X ^ C to X ^ ~C. */
721(simplify
722 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
c8ba6498
EB
723 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
724 (bit_xor (convert @0) (bit_not @1))))
5b00d921 725
e39dab2c
MG
726/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
727(for opo (bit_and bit_xor)
728 opi (bit_xor bit_and)
729 (simplify
730 (opo:c (opi:c @0 @1) @1)
731 (bit_and (bit_not @0) @1)))
97e77391 732
14ea9f92
RB
733/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
734 operands are another bit-wise operation with a common input. If so,
735 distribute the bit operations to save an operation and possibly two if
736 constants are involved. For example, convert
737 (A | B) & (A | C) into A | (B & C)
738 Further simplification will occur if B and C are constants. */
e07ab2fe
MG
739(for op (bit_and bit_ior bit_xor)
740 rop (bit_ior bit_and bit_and)
14ea9f92 741 (simplify
e07ab2fe
MG
742 (op (convert? (rop:c @0 @1)) (convert? (rop:c @0 @2)))
743 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
744 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
14ea9f92
RB
745 (rop (convert @0) (op (convert @1) (convert @2))))))
746
e39dab2c
MG
747/* Some simple reassociation for bit operations, also handled in reassoc. */
748/* (X & Y) & Y -> X & Y
749 (X | Y) | Y -> X | Y */
750(for op (bit_and bit_ior)
751 (simplify
752 (op:c (convert?@2 (op:c @0 @1)) (convert? @1))
753 @2))
754/* (X ^ Y) ^ Y -> X */
755(simplify
756 (bit_xor:c (convert? (bit_xor:c @0 @1)) (convert? @1))
ece46666 757 (convert @0))
e39dab2c
MG
758/* (X & Y) & (X & Z) -> (X & Y) & Z
759 (X | Y) | (X | Z) -> (X | Y) | Z */
760(for op (bit_and bit_ior)
761 (simplify
762 (op:c (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
763 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
764 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
765 (if (single_use (@5) && single_use (@6))
766 (op @3 (convert @2))
767 (if (single_use (@3) && single_use (@4))
768 (op (convert @1) @5))))))
769/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
770(simplify
771 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
772 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
773 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
d78789f5 774 (bit_xor (convert @1) (convert @2))))
5b00d921 775
b14a9c57
RB
776(simplify
777 (abs (abs@1 @0))
778 @1)
f3582e54
RB
779(simplify
780 (abs (negate @0))
781 (abs @0))
782(simplify
783 (abs tree_expr_nonnegative_p@0)
784 @0)
785
55cf3946
RB
786/* A few cases of fold-const.c negate_expr_p predicate. */
787(match negate_expr_p
788 INTEGER_CST
b14a9c57
RB
789 (if ((INTEGRAL_TYPE_P (type)
790 && TYPE_OVERFLOW_WRAPS (type))
791 || (!TYPE_OVERFLOW_SANITIZED (type)
55cf3946
RB
792 && may_negate_without_overflow_p (t)))))
793(match negate_expr_p
794 FIXED_CST)
795(match negate_expr_p
796 (negate @0)
797 (if (!TYPE_OVERFLOW_SANITIZED (type))))
798(match negate_expr_p
799 REAL_CST
800 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
801/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
802 ways. */
803(match negate_expr_p
804 VECTOR_CST
805 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
0a8f32b8
RB
806
807/* (-A) * (-B) -> A * B */
808(simplify
809 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
810 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
811 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
812 (mult (convert @0) (convert (negate @1)))))
55cf3946
RB
813
814/* -(A + B) -> (-B) - A. */
b14a9c57 815(simplify
55cf3946
RB
816 (negate (plus:c @0 negate_expr_p@1))
817 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
818 && !HONOR_SIGNED_ZEROS (element_mode (type)))
819 (minus (negate @1) @0)))
820
821/* A - B -> A + (-B) if B is easily negatable. */
b14a9c57 822(simplify
55cf3946 823 (minus @0 negate_expr_p@1)
e4e96a4f
KT
824 (if (!FIXED_POINT_TYPE_P (type))
825 (plus @0 (negate @1))))
d4573ffe 826
5609420f
RB
827/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
828 when profitable.
829 For bitwise binary operations apply operand conversions to the
830 binary operation result instead of to the operands. This allows
831 to combine successive conversions and bitwise binary operations.
832 We combine the above two cases by using a conditional convert. */
833(for bitop (bit_and bit_ior bit_xor)
834 (simplify
835 (bitop (convert @0) (convert? @1))
836 (if (((TREE_CODE (@1) == INTEGER_CST
837 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
ad6f996c 838 && int_fits_type_p (@1, TREE_TYPE (@0)))
aea417d7 839 || types_match (@0, @1))
ad6f996c
RB
840 /* ??? This transform conflicts with fold-const.c doing
841 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
842 constants (if x has signed type, the sign bit cannot be set
843 in c). This folds extension into the BIT_AND_EXPR.
844 Restrict it to GIMPLE to avoid endless recursions. */
845 && (bitop != BIT_AND_EXPR || GIMPLE)
5609420f
RB
846 && (/* That's a good idea if the conversion widens the operand, thus
847 after hoisting the conversion the operation will be narrower. */
848 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
849 /* It's also a good idea if the conversion is to a non-integer
850 mode. */
851 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
852 /* Or if the precision of TO is not the same as the precision
853 of its mode. */
854 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
855 (convert (bitop @0 (convert @1))))))
856
b14a9c57
RB
857(for bitop (bit_and bit_ior)
858 rbitop (bit_ior bit_and)
859 /* (x | y) & x -> x */
860 /* (x & y) | x -> x */
861 (simplify
862 (bitop:c (rbitop:c @0 @1) @0)
863 @0)
864 /* (~x | y) & x -> x & y */
865 /* (~x & y) | x -> x | y */
866 (simplify
867 (bitop:c (rbitop:c (bit_not @0) @1) @0)
868 (bitop @0 @1)))
869
5609420f
RB
870/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
871(simplify
872 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
873 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
874
875/* Combine successive equal operations with constants. */
876(for bitop (bit_and bit_ior bit_xor)
877 (simplify
878 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
879 (bitop @0 (bitop @1 @2))))
880
881/* Try simple folding for X op !X, and X op X with the help
882 of the truth_valued_p and logical_inverted_value predicates. */
883(match truth_valued_p
884 @0
885 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
f84e7fd6 886(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
5609420f
RB
887 (match truth_valued_p
888 (op @0 @1)))
889(match truth_valued_p
890 (truth_not @0))
891
0a8f32b8
RB
892(match (logical_inverted_value @0)
893 (truth_not @0))
5609420f
RB
894(match (logical_inverted_value @0)
895 (bit_not truth_valued_p@0))
896(match (logical_inverted_value @0)
09240451 897 (eq @0 integer_zerop))
5609420f 898(match (logical_inverted_value @0)
09240451 899 (ne truth_valued_p@0 integer_truep))
5609420f 900(match (logical_inverted_value @0)
09240451 901 (bit_xor truth_valued_p@0 integer_truep))
5609420f
RB
902
903/* X & !X -> 0. */
904(simplify
905 (bit_and:c @0 (logical_inverted_value @0))
906 { build_zero_cst (type); })
907/* X | !X and X ^ !X -> 1, , if X is truth-valued. */
908(for op (bit_ior bit_xor)
909 (simplify
910 (op:c truth_valued_p@0 (logical_inverted_value @0))
f84e7fd6 911 { constant_boolean_node (true, type); }))
59c20dc7
RB
912/* X ==/!= !X is false/true. */
913(for op (eq ne)
914 (simplify
915 (op:c truth_valued_p@0 (logical_inverted_value @0))
916 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
5609420f 917
5609420f
RB
918/* If arg1 and arg2 are booleans (or any single bit type)
919 then try to simplify:
920
921 (~X & Y) -> X < Y
922 (X & ~Y) -> Y < X
923 (~X | Y) -> X <= Y
924 (X | ~Y) -> Y <= X
925
926 But only do this if our result feeds into a comparison as
927 this transformation is not always a win, particularly on
928 targets with and-not instructions.
929 -> simplify_bitwise_binary_boolean */
930(simplify
931 (ne (bit_and:c (bit_not @0) @1) integer_zerop)
932 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
933 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
0f3f9437
RB
934 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
935 (lt @0 @1)
936 (gt @0 @1))))
5609420f
RB
937(simplify
938 (ne (bit_ior:c (bit_not @0) @1) integer_zerop)
939 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
940 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
0f3f9437
RB
941 (if (TYPE_UNSIGNED (TREE_TYPE (@1)))
942 (le @0 @1)
943 (ge @0 @1))))
5609420f 944
5609420f
RB
945/* ~~x -> x */
946(simplify
947 (bit_not (bit_not @0))
948 @0)
949
b14a9c57
RB
950/* Convert ~ (-A) to A - 1. */
951(simplify
952 (bit_not (convert? (negate @0)))
ece46666
MG
953 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
954 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
8b5ee871 955 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
b14a9c57
RB
956
957/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
958(simplify
8b5ee871 959 (bit_not (convert? (minus @0 integer_each_onep)))
ece46666
MG
960 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
961 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
962 (convert (negate @0))))
963(simplify
964 (bit_not (convert? (plus @0 integer_all_onesp)))
ece46666
MG
965 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
966 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
b14a9c57
RB
967 (convert (negate @0))))
968
969/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
970(simplify
971 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
972 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
973 (convert (bit_xor @0 (bit_not @1)))))
974(simplify
975 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
976 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
977 (convert (bit_xor @0 @1))))
978
f52baa7b
MP
979/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
980(simplify
44fc0a51
RB
981 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
982 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
f52baa7b 983
f7b7b0aa
MP
984/* Fold A - (A & B) into ~B & A. */
985(simplify
986 (minus (convert? @0) (convert?:s (bit_and:cs @0 @1)))
987 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
988 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
989 (convert (bit_and (bit_not @1) @0))))
5609420f 990
84ff66b8
AV
991
992
993/* ((X inner_op C0) outer_op C1)
994 With X being a tree where value_range has reasoned certain bits to always be
995 zero throughout its computed value range,
996 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
997 where zero_mask has 1's for all bits that are sure to be 0 in
998 and 0's otherwise.
999 if (inner_op == '^') C0 &= ~C1;
1000 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1001 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1002*/
1003(for inner_op (bit_ior bit_xor)
1004 outer_op (bit_xor bit_ior)
1005(simplify
1006 (outer_op
1007 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1008 (with
1009 {
1010 bool fail = false;
1011 wide_int zero_mask_not;
1012 wide_int C0;
1013 wide_int cst_emit;
1014
1015 if (TREE_CODE (@2) == SSA_NAME)
1016 zero_mask_not = get_nonzero_bits (@2);
1017 else
1018 fail = true;
1019
1020 if (inner_op == BIT_XOR_EXPR)
1021 {
1022 C0 = wi::bit_and_not (@0, @1);
1023 cst_emit = wi::bit_or (C0, @1);
1024 }
1025 else
1026 {
1027 C0 = @0;
1028 cst_emit = wi::bit_xor (@0, @1);
1029 }
1030 }
1031 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
1032 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1033 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
1034 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1035
a499aac5
RB
1036/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1037(simplify
44fc0a51
RB
1038 (pointer_plus (pointer_plus:s @0 @1) @3)
1039 (pointer_plus @0 (plus @1 @3)))
a499aac5
RB
1040
1041/* Pattern match
1042 tem1 = (long) ptr1;
1043 tem2 = (long) ptr2;
1044 tem3 = tem2 - tem1;
1045 tem4 = (unsigned long) tem3;
1046 tem5 = ptr1 + tem4;
1047 and produce
1048 tem5 = ptr2; */
1049(simplify
1050 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1051 /* Conditionally look through a sign-changing conversion. */
1052 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1053 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1054 || (GENERIC && type == TREE_TYPE (@1))))
1055 @1))
1056
1057/* Pattern match
1058 tem = (sizetype) ptr;
1059 tem = tem & algn;
1060 tem = -tem;
1061 ... = ptr p+ tem;
1062 and produce the simpler and easier to analyze with respect to alignment
1063 ... = ptr & ~algn; */
1064(simplify
1065 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1066 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1067 (bit_and @0 { algn; })))
1068
99e943a2
RB
1069/* Try folding difference of addresses. */
1070(simplify
1071 (minus (convert ADDR_EXPR@0) (convert @1))
1072 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1073 (with { HOST_WIDE_INT diff; }
1074 (if (ptr_difference_const (@0, @1, &diff))
1075 { build_int_cst_type (type, diff); }))))
1076(simplify
1077 (minus (convert @0) (convert ADDR_EXPR@1))
1078 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1079 (with { HOST_WIDE_INT diff; }
1080 (if (ptr_difference_const (@0, @1, &diff))
1081 { build_int_cst_type (type, diff); }))))
1082
bab73f11
RB
1083/* If arg0 is derived from the address of an object or function, we may
1084 be able to fold this expression using the object or function's
1085 alignment. */
1086(simplify
1087 (bit_and (convert? @0) INTEGER_CST@1)
1088 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1089 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1090 (with
1091 {
1092 unsigned int align;
1093 unsigned HOST_WIDE_INT bitpos;
1094 get_pointer_alignment_1 (@0, &align, &bitpos);
1095 }
1096 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1097 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
99e943a2 1098
a499aac5 1099
cc7b5acf
RB
1100/* We can't reassociate at all for saturating types. */
1101(if (!TYPE_SATURATING (type))
1102
1103 /* Contract negates. */
1104 /* A + (-B) -> A - B */
1105 (simplify
1106 (plus:c (convert1? @0) (convert2? (negate @1)))
1107 /* Apply STRIP_NOPS on @0 and the negate. */
1108 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1109 && tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1110 && !TYPE_OVERFLOW_SANITIZED (type))
cc7b5acf
RB
1111 (minus (convert @0) (convert @1))))
1112 /* A - (-B) -> A + B */
1113 (simplify
1114 (minus (convert1? @0) (convert2? (negate @1)))
1115 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
2f68e8bc 1116 && tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1117 && !TYPE_OVERFLOW_SANITIZED (type))
cc7b5acf
RB
1118 (plus (convert @0) (convert @1))))
1119 /* -(-A) -> A */
1120 (simplify
1121 (negate (convert? (negate @1)))
1122 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
6a4f0678 1123 && !TYPE_OVERFLOW_SANITIZED (type))
a0f12cf8 1124 (convert @1)))
cc7b5acf 1125
7318e44f
RB
1126 /* We can't reassociate floating-point unless -fassociative-math
1127 or fixed-point plus or minus because of saturation to +-Inf. */
1128 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1129 && !FIXED_POINT_TYPE_P (type))
cc7b5acf
RB
1130
1131 /* Match patterns that allow contracting a plus-minus pair
1132 irrespective of overflow issues. */
1133 /* (A +- B) - A -> +- B */
1134 /* (A +- B) -+ B -> A */
1135 /* A - (A +- B) -> -+ B */
1136 /* A +- (B -+ A) -> +- B */
1137 (simplify
1138 (minus (plus:c @0 @1) @0)
1139 @1)
1140 (simplify
1141 (minus (minus @0 @1) @0)
1142 (negate @1))
1143 (simplify
1144 (plus:c (minus @0 @1) @1)
1145 @0)
1146 (simplify
1147 (minus @0 (plus:c @0 @1))
1148 (negate @1))
1149 (simplify
1150 (minus @0 (minus @0 @1))
1151 @1)
1152
1153 /* (A +- CST) +- CST -> A + CST */
1154 (for outer_op (plus minus)
1155 (for inner_op (plus minus)
1156 (simplify
1157 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1158 /* If the constant operation overflows we cannot do the transform
1159 as we would introduce undefined overflow, for example
1160 with (a - 1) + INT_MIN. */
23f27839 1161 (with { tree cst = const_binop (outer_op == inner_op
cc7b5acf
RB
1162 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
1163 (if (cst && !TREE_OVERFLOW (cst))
1164 (inner_op @0 { cst; } ))))))
1165
1166 /* (CST - A) +- CST -> CST - A */
1167 (for outer_op (plus minus)
1168 (simplify
1169 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
23f27839 1170 (with { tree cst = const_binop (outer_op, type, @1, @2); }
cc7b5acf
RB
1171 (if (cst && !TREE_OVERFLOW (cst))
1172 (minus { cst; } @0)))))
1173
1174 /* ~A + A -> -1 */
1175 (simplify
1176 (plus:c (bit_not @0) @0)
1177 (if (!TYPE_OVERFLOW_TRAPS (type))
1178 { build_all_ones_cst (type); }))
1179
1180 /* ~A + 1 -> -A */
1181 (simplify
e19740ae
RB
1182 (plus (convert? (bit_not @0)) integer_each_onep)
1183 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1184 (negate (convert @0))))
1185
1186 /* -A - 1 -> ~A */
1187 (simplify
1188 (minus (convert? (negate @0)) integer_each_onep)
1189 (if (!TYPE_OVERFLOW_TRAPS (type)
1190 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1191 (bit_not (convert @0))))
1192
1193 /* -1 - A -> ~A */
1194 (simplify
1195 (minus integer_all_onesp @0)
bc4315fb 1196 (bit_not @0))
cc7b5acf
RB
1197
1198 /* (T)(P + A) - (T)P -> (T) A */
1199 (for add (plus pointer_plus)
1200 (simplify
1201 (minus (convert (add @0 @1))
1202 (convert @0))
09240451 1203 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
cc7b5acf
RB
1204 /* For integer types, if A has a smaller type
1205 than T the result depends on the possible
1206 overflow in P + A.
1207 E.g. T=size_t, A=(unsigned)429497295, P>0.
1208 However, if an overflow in P + A would cause
1209 undefined behavior, we can assume that there
1210 is no overflow. */
1211 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1212 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1213 /* For pointer types, if the conversion of A to the
1214 final type requires a sign- or zero-extension,
1215 then we have to punt - it is not defined which
1216 one is correct. */
1217 || (POINTER_TYPE_P (TREE_TYPE (@0))
1218 && TREE_CODE (@1) == INTEGER_CST
1219 && tree_int_cst_sign_bit (@1) == 0))
a8fc2579
RB
1220 (convert @1))))
1221
1222 /* (T)P - (T)(P + A) -> -(T) A */
1223 (for add (plus pointer_plus)
1224 (simplify
1225 (minus (convert @0)
1226 (convert (add @0 @1)))
1227 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1228 /* For integer types, if A has a smaller type
1229 than T the result depends on the possible
1230 overflow in P + A.
1231 E.g. T=size_t, A=(unsigned)429497295, P>0.
1232 However, if an overflow in P + A would cause
1233 undefined behavior, we can assume that there
1234 is no overflow. */
1235 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1236 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1237 /* For pointer types, if the conversion of A to the
1238 final type requires a sign- or zero-extension,
1239 then we have to punt - it is not defined which
1240 one is correct. */
1241 || (POINTER_TYPE_P (TREE_TYPE (@0))
1242 && TREE_CODE (@1) == INTEGER_CST
1243 && tree_int_cst_sign_bit (@1) == 0))
1244 (negate (convert @1)))))
1245
1246 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1247 (for add (plus pointer_plus)
1248 (simplify
1249 (minus (convert (add @0 @1))
1250 (convert (add @0 @2)))
1251 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1252 /* For integer types, if A has a smaller type
1253 than T the result depends on the possible
1254 overflow in P + A.
1255 E.g. T=size_t, A=(unsigned)429497295, P>0.
1256 However, if an overflow in P + A would cause
1257 undefined behavior, we can assume that there
1258 is no overflow. */
1259 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1260 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1261 /* For pointer types, if the conversion of A to the
1262 final type requires a sign- or zero-extension,
1263 then we have to punt - it is not defined which
1264 one is correct. */
1265 || (POINTER_TYPE_P (TREE_TYPE (@0))
1266 && TREE_CODE (@1) == INTEGER_CST
1267 && tree_int_cst_sign_bit (@1) == 0
1268 && TREE_CODE (@2) == INTEGER_CST
1269 && tree_int_cst_sign_bit (@2) == 0))
1270 (minus (convert @1) (convert @2)))))))
cc7b5acf
RB
1271
1272
0122e8e5 1273/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
a7f24614 1274
0122e8e5 1275(for minmax (min max FMIN FMAX)
a7f24614
RB
1276 (simplify
1277 (minmax @0 @0)
1278 @0))
4a334cba
RS
1279/* min(max(x,y),y) -> y. */
1280(simplify
1281 (min:c (max:c @0 @1) @1)
1282 @1)
1283/* max(min(x,y),y) -> y. */
1284(simplify
1285 (max:c (min:c @0 @1) @1)
1286 @1)
d657e995
RB
1287/* max(a,-a) -> abs(a). */
1288(simplify
1289 (max:c @0 (negate @0))
1290 (if (TREE_CODE (type) != COMPLEX_TYPE
1291 && (! ANY_INTEGRAL_TYPE_P (type)
1292 || TYPE_OVERFLOW_UNDEFINED (type)))
1293 (abs @0)))
a7f24614
RB
1294(simplify
1295 (min @0 @1)
2c2870a1
MG
1296 (switch
1297 (if (INTEGRAL_TYPE_P (type)
1298 && TYPE_MIN_VALUE (type)
1299 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1300 @1)
1301 (if (INTEGRAL_TYPE_P (type)
1302 && TYPE_MAX_VALUE (type)
1303 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1304 @0)))
a7f24614
RB
1305(simplify
1306 (max @0 @1)
2c2870a1
MG
1307 (switch
1308 (if (INTEGRAL_TYPE_P (type)
1309 && TYPE_MAX_VALUE (type)
1310 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1311 @1)
1312 (if (INTEGRAL_TYPE_P (type)
1313 && TYPE_MIN_VALUE (type)
1314 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1315 @0)))
0122e8e5
RS
1316(for minmax (FMIN FMAX)
1317 /* If either argument is NaN, return the other one. Avoid the
1318 transformation if we get (and honor) a signalling NaN. */
1319 (simplify
1320 (minmax:c @0 REAL_CST@1)
1321 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1322 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1323 @0)))
1324/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1325 functions to return the numeric arg if the other one is NaN.
1326 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1327 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1328 worry about it either. */
1329(if (flag_finite_math_only)
1330 (simplify
1331 (FMIN @0 @1)
1332 (min @0 @1))
1333 (simplify
1334 (FMAX @0 @1)
1335 (max @0 @1)))
ce0e66ff
MG
1336/* min (-A, -B) -> -max (A, B) */
1337(for minmax (min max FMIN FMAX)
1338 maxmin (max min FMAX FMIN)
1339 (simplify
1340 (minmax (negate:s@2 @0) (negate:s@3 @1))
1341 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1342 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1343 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1344 (negate (maxmin @0 @1)))))
1345/* MIN (~X, ~Y) -> ~MAX (X, Y)
1346 MAX (~X, ~Y) -> ~MIN (X, Y) */
1347(for minmax (min max)
1348 maxmin (max min)
1349 (simplify
1350 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1351 (bit_not (maxmin @0 @1))))
a7f24614 1352
b4817bd6
MG
1353/* MIN (X, Y) == X -> X <= Y */
1354(for minmax (min min max max)
1355 cmp (eq ne eq ne )
1356 out (le gt ge lt )
1357 (simplify
1358 (cmp:c (minmax:c @0 @1) @0)
1359 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1360 (out @0 @1))))
1361/* MIN (X, 5) == 0 -> X == 0
1362 MIN (X, 5) == 7 -> false */
1363(for cmp (eq ne)
1364 (simplify
1365 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1366 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1367 { constant_boolean_node (cmp == NE_EXPR, type); }
1368 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1369 (cmp @0 @2)))))
1370(for cmp (eq ne)
1371 (simplify
1372 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1373 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1374 { constant_boolean_node (cmp == NE_EXPR, type); }
1375 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1376 (cmp @0 @2)))))
1377/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1378(for minmax (min min max max min min max max )
1379 cmp (lt le gt ge gt ge lt le )
1380 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1381 (simplify
1382 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1383 (comb (cmp @0 @2) (cmp @1 @2))))
1384
a7f24614
RB
1385/* Simplifications of shift and rotates. */
1386
1387(for rotate (lrotate rrotate)
1388 (simplify
1389 (rotate integer_all_onesp@0 @1)
1390 @0))
1391
1392/* Optimize -1 >> x for arithmetic right shifts. */
1393(simplify
1394 (rshift integer_all_onesp@0 @1)
1395 (if (!TYPE_UNSIGNED (type)
1396 && tree_expr_nonnegative_p (@1))
1397 @0))
1398
12085390
N
1399/* Optimize (x >> c) << c into x & (-1<<c). */
1400(simplify
1401 (lshift (rshift @0 INTEGER_CST@1) @1)
1402 (if (wi::ltu_p (@1, element_precision (type)))
1403 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1404
1405/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1406 types. */
1407(simplify
1408 (rshift (lshift @0 INTEGER_CST@1) @1)
1409 (if (TYPE_UNSIGNED (type)
1410 && (wi::ltu_p (@1, element_precision (type))))
1411 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1412
a7f24614
RB
1413(for shiftrotate (lrotate rrotate lshift rshift)
1414 (simplify
1415 (shiftrotate @0 integer_zerop)
1416 (non_lvalue @0))
1417 (simplify
1418 (shiftrotate integer_zerop@0 @1)
1419 @0)
1420 /* Prefer vector1 << scalar to vector1 << vector2
1421 if vector2 is uniform. */
1422 (for vec (VECTOR_CST CONSTRUCTOR)
1423 (simplify
1424 (shiftrotate @0 vec@1)
1425 (with { tree tem = uniform_vector_p (@1); }
1426 (if (tem)
1427 (shiftrotate @0 { tem; }))))))
1428
1429/* Rewrite an LROTATE_EXPR by a constant into an
1430 RROTATE_EXPR by a new constant. */
1431(simplify
1432 (lrotate @0 INTEGER_CST@1)
23f27839 1433 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
a7f24614
RB
1434 build_int_cst (TREE_TYPE (@1),
1435 element_precision (type)), @1); }))
1436
14ea9f92
RB
1437/* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1438(for op (lrotate rrotate rshift lshift)
1439 (simplify
1440 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1441 (with { unsigned int prec = element_precision (type); }
1442 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1443 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1444 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1445 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1446 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1447 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1448 being well defined. */
1449 (if (low >= prec)
1450 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
8fdc6c67 1451 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
50301115 1452 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
8fdc6c67
RB
1453 { build_zero_cst (type); }
1454 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1455 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
14ea9f92
RB
1456
1457
01ada710
MP
1458/* ((1 << A) & 1) != 0 -> A == 0
1459 ((1 << A) & 1) == 0 -> A != 0 */
1460(for cmp (ne eq)
1461 icmp (eq ne)
1462 (simplify
1463 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1464 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
cc7b5acf 1465
f2e609c3
MP
1466/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1467 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1468 if CST2 != 0. */
1469(for cmp (ne eq)
1470 (simplify
1471 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1472 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1473 (if (cand < 0
1474 || (!integer_zerop (@2)
1475 && wi::ne_p (wi::lshift (@0, cand), @2)))
8fdc6c67
RB
1476 { constant_boolean_node (cmp == NE_EXPR, type); }
1477 (if (!integer_zerop (@2)
1478 && wi::eq_p (wi::lshift (@0, cand), @2))
1479 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
f2e609c3 1480
1ffbaa3f
RB
1481/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1482 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1483 if the new mask might be further optimized. */
1484(for shift (lshift rshift)
1485 (simplify
44fc0a51
RB
1486 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1487 INTEGER_CST@2)
1ffbaa3f
RB
1488 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1489 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1490 && tree_fits_uhwi_p (@1)
1491 && tree_to_uhwi (@1) > 0
1492 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1493 (with
1494 {
1495 unsigned int shiftc = tree_to_uhwi (@1);
1496 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1497 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1498 tree shift_type = TREE_TYPE (@3);
1499 unsigned int prec;
1500
1501 if (shift == LSHIFT_EXPR)
fecfbfa4 1502 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1ffbaa3f
RB
1503 else if (shift == RSHIFT_EXPR
1504 && (TYPE_PRECISION (shift_type)
1505 == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
1506 {
1507 prec = TYPE_PRECISION (TREE_TYPE (@3));
1508 tree arg00 = @0;
1509 /* See if more bits can be proven as zero because of
1510 zero extension. */
1511 if (@3 != @0
1512 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1513 {
1514 tree inner_type = TREE_TYPE (@0);
1515 if ((TYPE_PRECISION (inner_type)
1516 == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
1517 && TYPE_PRECISION (inner_type) < prec)
1518 {
1519 prec = TYPE_PRECISION (inner_type);
1520 /* See if we can shorten the right shift. */
1521 if (shiftc < prec)
1522 shift_type = inner_type;
1523 /* Otherwise X >> C1 is all zeros, so we'll optimize
1524 it into (X, 0) later on by making sure zerobits
1525 is all ones. */
1526 }
1527 }
dd4786fe 1528 zerobits = HOST_WIDE_INT_M1U;
1ffbaa3f
RB
1529 if (shiftc < prec)
1530 {
1531 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1532 zerobits <<= prec - shiftc;
1533 }
1534 /* For arithmetic shift if sign bit could be set, zerobits
1535 can contain actually sign bits, so no transformation is
1536 possible, unless MASK masks them all away. In that
1537 case the shift needs to be converted into logical shift. */
1538 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1539 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1540 {
1541 if ((mask & zerobits) == 0)
1542 shift_type = unsigned_type_for (TREE_TYPE (@3));
1543 else
1544 zerobits = 0;
1545 }
1546 }
1547 }
1548 /* ((X << 16) & 0xff00) is (X, 0). */
1549 (if ((mask & zerobits) == mask)
8fdc6c67
RB
1550 { build_int_cst (type, 0); }
1551 (with { newmask = mask | zerobits; }
1552 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1553 (with
1554 {
1555 /* Only do the transformation if NEWMASK is some integer
1556 mode's mask. */
1557 for (prec = BITS_PER_UNIT;
1558 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
fecfbfa4 1559 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
8fdc6c67
RB
1560 break;
1561 }
1562 (if (prec < HOST_BITS_PER_WIDE_INT
dd4786fe 1563 || newmask == HOST_WIDE_INT_M1U)
8fdc6c67
RB
1564 (with
1565 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1566 (if (!tree_int_cst_equal (newmaskt, @2))
1567 (if (shift_type != TREE_TYPE (@3))
1568 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1569 (bit_and @4 { newmaskt; })))))))))))))
1ffbaa3f 1570
84ff66b8
AV
1571/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1572 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
98e30e51 1573(for shift (lshift rshift)
84ff66b8
AV
1574 (for bit_op (bit_and bit_xor bit_ior)
1575 (simplify
1576 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
1577 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1578 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
1579 (bit_op (shift (convert @0) @1) { mask; }))))))
98e30e51 1580
ad1d92ab
MM
1581/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
1582(simplify
1583 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
1584 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
ece46666
MG
1585 && (element_precision (TREE_TYPE (@0))
1586 <= element_precision (TREE_TYPE (@1))
1587 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
ad1d92ab
MM
1588 (with
1589 { tree shift_type = TREE_TYPE (@0); }
1590 (convert (rshift (convert:shift_type @1) @2)))))
1591
1592/* ~(~X >>r Y) -> X >>r Y
1593 ~(~X <<r Y) -> X <<r Y */
1594(for rotate (lrotate rrotate)
1595 (simplify
1596 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
ece46666
MG
1597 (if ((element_precision (TREE_TYPE (@0))
1598 <= element_precision (TREE_TYPE (@1))
1599 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
1600 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
1601 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
ad1d92ab
MM
1602 (with
1603 { tree rotate_type = TREE_TYPE (@0); }
1604 (convert (rotate (convert:rotate_type @1) @2))))))
98e30e51 1605
d4573ffe
RB
1606/* Simplifications of conversions. */
1607
1608/* Basic strip-useless-type-conversions / strip_nops. */
f3582e54 1609(for cvt (convert view_convert float fix_trunc)
d4573ffe
RB
1610 (simplify
1611 (cvt @0)
1612 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
1613 || (GENERIC && type == TREE_TYPE (@0)))
1614 @0)))
1615
1616/* Contract view-conversions. */
1617(simplify
1618 (view_convert (view_convert @0))
1619 (view_convert @0))
1620
1621/* For integral conversions with the same precision or pointer
1622 conversions use a NOP_EXPR instead. */
1623(simplify
1624 (view_convert @0)
1625 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
1626 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1627 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
1628 (convert @0)))
1629
1630/* Strip inner integral conversions that do not change precision or size. */
1631(simplify
1632 (view_convert (convert@0 @1))
1633 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1634 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
1635 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1636 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
1637 (view_convert @1)))
1638
1639/* Re-association barriers around constants and other re-association
1640 barriers can be removed. */
1641(simplify
1642 (paren CONSTANT_CLASS_P@0)
1643 @0)
1644(simplify
1645 (paren (paren@1 @0))
1646 @1)
1e51d0a2
RB
1647
1648/* Handle cases of two conversions in a row. */
1649(for ocvt (convert float fix_trunc)
1650 (for icvt (convert float)
1651 (simplify
1652 (ocvt (icvt@1 @0))
1653 (with
1654 {
1655 tree inside_type = TREE_TYPE (@0);
1656 tree inter_type = TREE_TYPE (@1);
1657 int inside_int = INTEGRAL_TYPE_P (inside_type);
1658 int inside_ptr = POINTER_TYPE_P (inside_type);
1659 int inside_float = FLOAT_TYPE_P (inside_type);
09240451 1660 int inside_vec = VECTOR_TYPE_P (inside_type);
1e51d0a2
RB
1661 unsigned int inside_prec = TYPE_PRECISION (inside_type);
1662 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
1663 int inter_int = INTEGRAL_TYPE_P (inter_type);
1664 int inter_ptr = POINTER_TYPE_P (inter_type);
1665 int inter_float = FLOAT_TYPE_P (inter_type);
09240451 1666 int inter_vec = VECTOR_TYPE_P (inter_type);
1e51d0a2
RB
1667 unsigned int inter_prec = TYPE_PRECISION (inter_type);
1668 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
1669 int final_int = INTEGRAL_TYPE_P (type);
1670 int final_ptr = POINTER_TYPE_P (type);
1671 int final_float = FLOAT_TYPE_P (type);
09240451 1672 int final_vec = VECTOR_TYPE_P (type);
1e51d0a2
RB
1673 unsigned int final_prec = TYPE_PRECISION (type);
1674 int final_unsignedp = TYPE_UNSIGNED (type);
1675 }
64d3a1f0
RB
1676 (switch
1677 /* In addition to the cases of two conversions in a row
1678 handled below, if we are converting something to its own
1679 type via an object of identical or wider precision, neither
1680 conversion is needed. */
1681 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
1682 || (GENERIC
1683 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
1684 && (((inter_int || inter_ptr) && final_int)
1685 || (inter_float && final_float))
1686 && inter_prec >= final_prec)
1687 (ocvt @0))
1688
1689 /* Likewise, if the intermediate and initial types are either both
1690 float or both integer, we don't need the middle conversion if the
1691 former is wider than the latter and doesn't change the signedness
1692 (for integers). Avoid this if the final type is a pointer since
36088299 1693 then we sometimes need the middle conversion. */
64d3a1f0
RB
1694 (if (((inter_int && inside_int) || (inter_float && inside_float))
1695 && (final_int || final_float)
1696 && inter_prec >= inside_prec
36088299 1697 && (inter_float || inter_unsignedp == inside_unsignedp))
64d3a1f0
RB
1698 (ocvt @0))
1699
1700 /* If we have a sign-extension of a zero-extended value, we can
1701 replace that by a single zero-extension. Likewise if the
1702 final conversion does not change precision we can drop the
1703 intermediate conversion. */
1704 (if (inside_int && inter_int && final_int
1705 && ((inside_prec < inter_prec && inter_prec < final_prec
1706 && inside_unsignedp && !inter_unsignedp)
1707 || final_prec == inter_prec))
1708 (ocvt @0))
1709
1710 /* Two conversions in a row are not needed unless:
1e51d0a2
RB
1711 - some conversion is floating-point (overstrict for now), or
1712 - some conversion is a vector (overstrict for now), or
1713 - the intermediate type is narrower than both initial and
1714 final, or
1715 - the intermediate type and innermost type differ in signedness,
1716 and the outermost type is wider than the intermediate, or
1717 - the initial type is a pointer type and the precisions of the
1718 intermediate and final types differ, or
1719 - the final type is a pointer type and the precisions of the
1720 initial and intermediate types differ. */
64d3a1f0
RB
1721 (if (! inside_float && ! inter_float && ! final_float
1722 && ! inside_vec && ! inter_vec && ! final_vec
1723 && (inter_prec >= inside_prec || inter_prec >= final_prec)
1724 && ! (inside_int && inter_int
1725 && inter_unsignedp != inside_unsignedp
1726 && inter_prec < final_prec)
1727 && ((inter_unsignedp && inter_prec > inside_prec)
1728 == (final_unsignedp && final_prec > inter_prec))
1729 && ! (inside_ptr && inter_prec != final_prec)
36088299 1730 && ! (final_ptr && inside_prec != inter_prec))
64d3a1f0
RB
1731 (ocvt @0))
1732
1733 /* A truncation to an unsigned type (a zero-extension) should be
1734 canonicalized as bitwise and of a mask. */
1d510e04
JJ
1735 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
1736 && final_int && inter_int && inside_int
64d3a1f0
RB
1737 && final_prec == inside_prec
1738 && final_prec > inter_prec
1739 && inter_unsignedp)
1740 (convert (bit_and @0 { wide_int_to_tree
1741 (inside_type,
1742 wi::mask (inter_prec, false,
1743 TYPE_PRECISION (inside_type))); })))
1744
1745 /* If we are converting an integer to a floating-point that can
1746 represent it exactly and back to an integer, we can skip the
1747 floating-point conversion. */
1748 (if (GIMPLE /* PR66211 */
1749 && inside_int && inter_float && final_int &&
1750 (unsigned) significand_size (TYPE_MODE (inter_type))
1751 >= inside_prec - !inside_unsignedp)
1752 (convert @0)))))))
ea2042ba
RB
1753
1754/* If we have a narrowing conversion to an integral type that is fed by a
1755 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
1756 masks off bits outside the final type (and nothing else). */
1757(simplify
1758 (convert (bit_and @0 INTEGER_CST@1))
1759 (if (INTEGRAL_TYPE_P (type)
1760 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1761 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
1762 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
1763 TYPE_PRECISION (type)), 0))
1764 (convert @0)))
a25454ea
RB
1765
1766
1767/* (X /[ex] A) * A -> X. */
1768(simplify
1769 (mult (convert? (exact_div @0 @1)) @1)
1770 /* Look through a sign-changing conversion. */
257b01ba 1771 (convert @0))
eaeba53a 1772
a7f24614
RB
1773/* Canonicalization of binary operations. */
1774
1775/* Convert X + -C into X - C. */
1776(simplify
1777 (plus @0 REAL_CST@1)
1778 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
23f27839 1779 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
a7f24614
RB
1780 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1781 (minus @0 { tem; })))))
1782
6b6aa8d3 1783/* Convert x+x into x*2. */
a7f24614
RB
1784(simplify
1785 (plus @0 @0)
1786 (if (SCALAR_FLOAT_TYPE_P (type))
6b6aa8d3
MG
1787 (mult @0 { build_real (type, dconst2); })
1788 (if (INTEGRAL_TYPE_P (type))
1789 (mult @0 { build_int_cst (type, 2); }))))
a7f24614
RB
1790
1791(simplify
1792 (minus integer_zerop @1)
1793 (negate @1))
1794
1795/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
1796 ARG0 is zero and X + ARG0 reduces to X, since that would mean
1797 (-ARG1 + ARG0) reduces to -ARG1. */
1798(simplify
1799 (minus real_zerop@0 @1)
1800 (if (fold_real_zero_addition_p (type, @0, 0))
1801 (negate @1)))
1802
1803/* Transform x * -1 into -x. */
1804(simplify
1805 (mult @0 integer_minus_onep)
1806 (negate @0))
eaeba53a 1807
96285749
RS
1808/* True if we can easily extract the real and imaginary parts of a complex
1809 number. */
1810(match compositional_complex
1811 (convert? (complex @0 @1)))
1812
eaeba53a
RB
1813/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
1814(simplify
1815 (complex (realpart @0) (imagpart @0))
1816 @0)
1817(simplify
1818 (realpart (complex @0 @1))
1819 @0)
1820(simplify
1821 (imagpart (complex @0 @1))
1822 @1)
83633539 1823
77c028c5
MG
1824/* Sometimes we only care about half of a complex expression. */
1825(simplify
1826 (realpart (convert?:s (conj:s @0)))
1827 (convert (realpart @0)))
1828(simplify
1829 (imagpart (convert?:s (conj:s @0)))
1830 (convert (negate (imagpart @0))))
1831(for part (realpart imagpart)
1832 (for op (plus minus)
1833 (simplify
1834 (part (convert?:s@2 (op:s @0 @1)))
1835 (convert (op (part @0) (part @1))))))
1836(simplify
1837 (realpart (convert?:s (CEXPI:s @0)))
1838 (convert (COS @0)))
1839(simplify
1840 (imagpart (convert?:s (CEXPI:s @0)))
1841 (convert (SIN @0)))
1842
1843/* conj(conj(x)) -> x */
1844(simplify
1845 (conj (convert? (conj @0)))
1846 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
1847 (convert @0)))
1848
1849/* conj({x,y}) -> {x,-y} */
1850(simplify
1851 (conj (convert?:s (complex:s @0 @1)))
1852 (with { tree itype = TREE_TYPE (type); }
1853 (complex (convert:itype @0) (negate (convert:itype @1)))))
83633539
RB
1854
1855/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
1856(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
1857 (simplify
1858 (bswap (bswap @0))
1859 @0)
1860 (simplify
1861 (bswap (bit_not (bswap @0)))
1862 (bit_not @0))
1863 (for bitop (bit_xor bit_ior bit_and)
1864 (simplify
1865 (bswap (bitop:c (bswap @0) @1))
1866 (bitop @0 (bswap @1)))))
96994de0
RB
1867
1868
1869/* Combine COND_EXPRs and VEC_COND_EXPRs. */
1870
1871/* Simplify constant conditions.
1872 Only optimize constant conditions when the selected branch
1873 has the same type as the COND_EXPR. This avoids optimizing
1874 away "c ? x : throw", where the throw has a void type.
1875 Note that we cannot throw away the fold-const.c variant nor
1876 this one as we depend on doing this transform before possibly
1877 A ? B : B -> B triggers and the fold-const.c one can optimize
1878 0 ? A : B to B even if A has side-effects. Something
1879 genmatch cannot handle. */
1880(simplify
1881 (cond INTEGER_CST@0 @1 @2)
8fdc6c67
RB
1882 (if (integer_zerop (@0))
1883 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
1884 @2)
1885 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
1886 @1)))
96994de0
RB
1887(simplify
1888 (vec_cond VECTOR_CST@0 @1 @2)
1889 (if (integer_all_onesp (@0))
8fdc6c67
RB
1890 @1
1891 (if (integer_zerop (@0))
1892 @2)))
96994de0
RB
1893
1894(for cnd (cond vec_cond)
1895 /* A ? B : (A ? X : C) -> A ? B : C. */
1896 (simplify
1897 (cnd @0 (cnd @0 @1 @2) @3)
1898 (cnd @0 @1 @3))
1899 (simplify
1900 (cnd @0 @1 (cnd @0 @2 @3))
1901 (cnd @0 @1 @3))
24a179f8
RB
1902 /* A ? B : (!A ? C : X) -> A ? B : C. */
1903 /* ??? This matches embedded conditions open-coded because genmatch
1904 would generate matching code for conditions in separate stmts only.
1905 The following is still important to merge then and else arm cases
1906 from if-conversion. */
1907 (simplify
1908 (cnd @0 @1 (cnd @2 @3 @4))
1909 (if (COMPARISON_CLASS_P (@0)
1910 && COMPARISON_CLASS_P (@2)
1911 && invert_tree_comparison
1912 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
1913 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
1914 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
1915 (cnd @0 @1 @3)))
1916 (simplify
1917 (cnd @0 (cnd @1 @2 @3) @4)
1918 (if (COMPARISON_CLASS_P (@0)
1919 && COMPARISON_CLASS_P (@1)
1920 && invert_tree_comparison
1921 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
1922 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
1923 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
1924 (cnd @0 @3 @4)))
96994de0
RB
1925
1926 /* A ? B : B -> B. */
1927 (simplify
1928 (cnd @0 @1 @1)
09240451 1929 @1)
96994de0 1930
09240451
MG
1931 /* !A ? B : C -> A ? C : B. */
1932 (simplify
1933 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
1934 (cnd @0 @2 @1)))
f84e7fd6 1935
a3ca1bc5
RB
1936/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
1937 return all -1 or all 0 results. */
f43d102e
RS
1938/* ??? We could instead convert all instances of the vec_cond to negate,
1939 but that isn't necessarily a win on its own. */
1940(simplify
a3ca1bc5 1941 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 1942 (if (VECTOR_TYPE_P (type)
4d8989d5 1943 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
f43d102e 1944 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 1945 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 1946 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f43d102e 1947
a3ca1bc5 1948/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
f43d102e 1949(simplify
a3ca1bc5 1950 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
f43d102e 1951 (if (VECTOR_TYPE_P (type)
4d8989d5 1952 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
f43d102e 1953 && (TYPE_MODE (TREE_TYPE (type))
4d8989d5 1954 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
a3ca1bc5 1955 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
f84e7fd6 1956
2ee05f1e 1957
f84e7fd6
RB
1958/* Simplifications of comparisons. */
1959
24f1db9c
RB
1960/* See if we can reduce the magnitude of a constant involved in a
1961 comparison by changing the comparison code. This is a canonicalization
1962 formerly done by maybe_canonicalize_comparison_1. */
1963(for cmp (le gt)
1964 acmp (lt ge)
1965 (simplify
1966 (cmp @0 INTEGER_CST@1)
1967 (if (tree_int_cst_sgn (@1) == -1)
1968 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
1969(for cmp (ge lt)
1970 acmp (gt le)
1971 (simplify
1972 (cmp @0 INTEGER_CST@1)
1973 (if (tree_int_cst_sgn (@1) == 1)
1974 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
1975
1976
f84e7fd6
RB
1977/* We can simplify a logical negation of a comparison to the
1978 inverted comparison. As we cannot compute an expression
1979 operator using invert_tree_comparison we have to simulate
1980 that with expression code iteration. */
1981(for cmp (tcc_comparison)
1982 icmp (inverted_tcc_comparison)
1983 ncmp (inverted_tcc_comparison_with_nans)
1984 /* Ideally we'd like to combine the following two patterns
1985 and handle some more cases by using
1986 (logical_inverted_value (cmp @0 @1))
1987 here but for that genmatch would need to "inline" that.
1988 For now implement what forward_propagate_comparison did. */
1989 (simplify
1990 (bit_not (cmp @0 @1))
1991 (if (VECTOR_TYPE_P (type)
1992 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
1993 /* Comparison inversion may be impossible for trapping math,
1994 invert_tree_comparison will tell us. But we can't use
1995 a computed operator in the replacement tree thus we have
1996 to play the trick below. */
1997 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 1998 (cmp, HONOR_NANS (@0)); }
f84e7fd6 1999 (if (ic == icmp)
8fdc6c67
RB
2000 (icmp @0 @1)
2001 (if (ic == ncmp)
2002 (ncmp @0 @1))))))
f84e7fd6 2003 (simplify
09240451
MG
2004 (bit_xor (cmp @0 @1) integer_truep)
2005 (with { enum tree_code ic = invert_tree_comparison
1b457aa4 2006 (cmp, HONOR_NANS (@0)); }
09240451 2007 (if (ic == icmp)
8fdc6c67
RB
2008 (icmp @0 @1)
2009 (if (ic == ncmp)
2010 (ncmp @0 @1))))))
e18c1d66 2011
2ee05f1e
RB
2012/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2013 ??? The transformation is valid for the other operators if overflow
2014 is undefined for the type, but performing it here badly interacts
2015 with the transformation in fold_cond_expr_with_comparison which
2016 attempts to synthetize ABS_EXPR. */
2017(for cmp (eq ne)
2018 (simplify
d9ba1961
RB
2019 (cmp (minus@2 @0 @1) integer_zerop)
2020 (if (single_use (@2))
2021 (cmp @0 @1))))
2ee05f1e
RB
2022
2023/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2024 signed arithmetic case. That form is created by the compiler
2025 often enough for folding it to be of value. One example is in
2026 computing loop trip counts after Operator Strength Reduction. */
07cdc2b8
RB
2027(for cmp (simple_comparison)
2028 scmp (swapped_simple_comparison)
2ee05f1e 2029 (simplify
bc6e9db4 2030 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2ee05f1e
RB
2031 /* Handle unfolded multiplication by zero. */
2032 (if (integer_zerop (@1))
8fdc6c67
RB
2033 (cmp @1 @2)
2034 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
bc6e9db4
RB
2035 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2036 && single_use (@3))
8fdc6c67
RB
2037 /* If @1 is negative we swap the sense of the comparison. */
2038 (if (tree_int_cst_sgn (@1) < 0)
2039 (scmp @0 @2)
2040 (cmp @0 @2))))))
2ee05f1e
RB
2041
2042/* Simplify comparison of something with itself. For IEEE
2043 floating-point, we can only do some of these simplifications. */
287f8f17 2044(for cmp (eq ge le)
2ee05f1e
RB
2045 (simplify
2046 (cmp @0 @0)
287f8f17 2047 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 2048 || ! HONOR_NANS (@0))
287f8f17
RB
2049 { constant_boolean_node (true, type); }
2050 (if (cmp != EQ_EXPR)
2051 (eq @0 @0)))))
2ee05f1e
RB
2052(for cmp (ne gt lt)
2053 (simplify
2054 (cmp @0 @0)
2055 (if (cmp != NE_EXPR
2056 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
b9407883 2057 || ! HONOR_NANS (@0))
2ee05f1e 2058 { constant_boolean_node (false, type); })))
b5d3d787
RB
2059(for cmp (unle unge uneq)
2060 (simplify
2061 (cmp @0 @0)
2062 { constant_boolean_node (true, type); }))
dd53d197
MG
2063(for cmp (unlt ungt)
2064 (simplify
2065 (cmp @0 @0)
2066 (unordered @0 @0)))
b5d3d787
RB
2067(simplify
2068 (ltgt @0 @0)
2069 (if (!flag_trapping_math)
2070 { constant_boolean_node (false, type); }))
2ee05f1e
RB
2071
2072/* Fold ~X op ~Y as Y op X. */
07cdc2b8 2073(for cmp (simple_comparison)
2ee05f1e 2074 (simplify
7fe996ba
RB
2075 (cmp (bit_not@2 @0) (bit_not@3 @1))
2076 (if (single_use (@2) && single_use (@3))
2077 (cmp @1 @0))))
2ee05f1e
RB
2078
2079/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
07cdc2b8
RB
2080(for cmp (simple_comparison)
2081 scmp (swapped_simple_comparison)
2ee05f1e 2082 (simplify
7fe996ba
RB
2083 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2084 (if (single_use (@2)
2085 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2ee05f1e
RB
2086 (scmp @0 (bit_not @1)))))
2087
07cdc2b8
RB
2088(for cmp (simple_comparison)
2089 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2090 (simplify
2091 (cmp (convert@2 @0) (convert? @1))
2092 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2093 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2094 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2095 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2096 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2097 (with
2098 {
2099 tree type1 = TREE_TYPE (@1);
2100 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2101 {
2102 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2103 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2104 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2105 type1 = float_type_node;
2106 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2107 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2108 type1 = double_type_node;
2109 }
2110 tree newtype
2111 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2112 ? TREE_TYPE (@0) : type1);
2113 }
2114 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2115 (cmp (convert:newtype @0) (convert:newtype @1))))))
2116
2117 (simplify
2118 (cmp @0 REAL_CST@1)
2119 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
64d3a1f0
RB
2120 (switch
2121 /* a CMP (-0) -> a CMP 0 */
2122 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2123 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2124 /* x != NaN is always true, other ops are always false. */
2125 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2126 && ! HONOR_SNANS (@1))
2127 { constant_boolean_node (cmp == NE_EXPR, type); })
2128 /* Fold comparisons against infinity. */
2129 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2130 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2131 (with
2132 {
2133 REAL_VALUE_TYPE max;
2134 enum tree_code code = cmp;
2135 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2136 if (neg)
2137 code = swap_tree_comparison (code);
2138 }
2139 (switch
2140 /* x > +Inf is always false, if with ignore sNANs. */
2141 (if (code == GT_EXPR
2142 && ! HONOR_SNANS (@0))
2143 { constant_boolean_node (false, type); })
2144 (if (code == LE_EXPR)
2145 /* x <= +Inf is always true, if we don't case about NaNs. */
2146 (if (! HONOR_NANS (@0))
2147 { constant_boolean_node (true, type); }
b0eb889b 2148 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
64d3a1f0
RB
2149 (eq @0 @0)))
2150 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2151 (if (code == EQ_EXPR || code == GE_EXPR)
2152 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2153 (if (neg)
2154 (lt @0 { build_real (TREE_TYPE (@0), max); })
2155 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2156 /* x < +Inf is always equal to x <= DBL_MAX. */
2157 (if (code == LT_EXPR)
2158 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2159 (if (neg)
2160 (ge @0 { build_real (TREE_TYPE (@0), max); })
2161 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2162 /* x != +Inf is always equal to !(x > DBL_MAX). */
2163 (if (code == NE_EXPR)
2164 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2165 (if (! HONOR_NANS (@0))
2166 (if (neg)
2167 (ge @0 { build_real (TREE_TYPE (@0), max); })
2168 (le @0 { build_real (TREE_TYPE (@0), max); }))
2169 (if (neg)
2170 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2171 { build_one_cst (type); })
2172 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2173 { build_one_cst (type); }))))))))))
07cdc2b8
RB
2174
2175 /* If this is a comparison of a real constant with a PLUS_EXPR
2176 or a MINUS_EXPR of a real constant, we can convert it into a
2177 comparison with a revised real constant as long as no overflow
2178 occurs when unsafe_math_optimizations are enabled. */
2179 (if (flag_unsafe_math_optimizations)
2180 (for op (plus minus)
2181 (simplify
2182 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2183 (with
2184 {
2185 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2186 TREE_TYPE (@1), @2, @1);
2187 }
f980c9a2 2188 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
2189 (cmp @0 { tem; }))))))
2190
2191 /* Likewise, we can simplify a comparison of a real constant with
2192 a MINUS_EXPR whose first operand is also a real constant, i.e.
2193 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2194 floating-point types only if -fassociative-math is set. */
2195 (if (flag_associative_math)
2196 (simplify
0409237b 2197 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
07cdc2b8 2198 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
f980c9a2 2199 (if (tem && !TREE_OVERFLOW (tem))
07cdc2b8
RB
2200 (cmp { tem; } @1)))))
2201
2202 /* Fold comparisons against built-in math functions. */
2203 (if (flag_unsafe_math_optimizations
2204 && ! flag_errno_math)
2205 (for sq (SQRT)
2206 (simplify
2207 (cmp (sq @0) REAL_CST@1)
64d3a1f0
RB
2208 (switch
2209 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2210 (switch
2211 /* sqrt(x) < y is always false, if y is negative. */
2212 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
8fdc6c67 2213 { constant_boolean_node (false, type); })
64d3a1f0
RB
2214 /* sqrt(x) > y is always true, if y is negative and we
2215 don't care about NaNs, i.e. negative values of x. */
2216 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2217 { constant_boolean_node (true, type); })
2218 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2219 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
c53233c6
RS
2220 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2221 (switch
2222 /* sqrt(x) < 0 is always false. */
2223 (if (cmp == LT_EXPR)
2224 { constant_boolean_node (false, type); })
2225 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2226 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2227 { constant_boolean_node (true, type); })
2228 /* sqrt(x) <= 0 -> x == 0. */
2229 (if (cmp == LE_EXPR)
2230 (eq @0 @1))
2231 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2232 == or !=. In the last case:
2233
2234 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2235
2236 if x is negative or NaN. Due to -funsafe-math-optimizations,
2237 the results for other x follow from natural arithmetic. */
2238 (cmp @0 @1)))
64d3a1f0
RB
2239 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2240 (with
2241 {
2242 REAL_VALUE_TYPE c2;
5c88ea94
RS
2243 real_arithmetic (&c2, MULT_EXPR,
2244 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
2245 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2246 }
2247 (if (REAL_VALUE_ISINF (c2))
2248 /* sqrt(x) > y is x == +Inf, when y is very large. */
2249 (if (HONOR_INFINITIES (@0))
2250 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2251 { constant_boolean_node (false, type); })
2252 /* sqrt(x) > c is the same as x > c*c. */
2253 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2254 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2255 (with
2256 {
2257 REAL_VALUE_TYPE c2;
5c88ea94
RS
2258 real_arithmetic (&c2, MULT_EXPR,
2259 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
64d3a1f0
RB
2260 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2261 }
2262 (if (REAL_VALUE_ISINF (c2))
2263 (switch
2264 /* sqrt(x) < y is always true, when y is a very large
2265 value and we don't care about NaNs or Infinities. */
2266 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2267 { constant_boolean_node (true, type); })
2268 /* sqrt(x) < y is x != +Inf when y is very large and we
2269 don't care about NaNs. */
2270 (if (! HONOR_NANS (@0))
2271 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2272 /* sqrt(x) < y is x >= 0 when y is very large and we
2273 don't care about Infinities. */
2274 (if (! HONOR_INFINITIES (@0))
2275 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2276 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2277 (if (GENERIC)
2278 (truth_andif
2279 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2280 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2281 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2282 (if (! HONOR_NANS (@0))
2283 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2284 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2285 (if (GENERIC)
2286 (truth_andif
2287 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2288 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))))))
2ee05f1e 2289
cfdc4f33
MG
2290/* Unordered tests if either argument is a NaN. */
2291(simplify
2292 (bit_ior (unordered @0 @0) (unordered @1 @1))
aea417d7 2293 (if (types_match (@0, @1))
cfdc4f33 2294 (unordered @0 @1)))
257b01ba
MG
2295(simplify
2296 (bit_and (ordered @0 @0) (ordered @1 @1))
2297 (if (types_match (@0, @1))
2298 (ordered @0 @1)))
cfdc4f33
MG
2299(simplify
2300 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
2301 @2)
257b01ba
MG
2302(simplify
2303 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
2304 @2)
e18c1d66 2305
90c6f26c
RB
2306/* Simple range test simplifications. */
2307/* A < B || A >= B -> true. */
5d30c58d
RB
2308(for test1 (lt le le le ne ge)
2309 test2 (ge gt ge ne eq ne)
90c6f26c
RB
2310 (simplify
2311 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
2312 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2313 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2314 { constant_boolean_node (true, type); })))
2315/* A < B && A >= B -> false. */
2316(for test1 (lt lt lt le ne eq)
2317 test2 (ge gt eq gt eq gt)
2318 (simplify
2319 (bit_and:c (test1 @0 @1) (test2 @0 @1))
2320 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2321 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2322 { constant_boolean_node (false, type); })))
2323
534bd33b
MG
2324/* -A CMP -B -> B CMP A. */
2325(for cmp (tcc_comparison)
2326 scmp (swapped_tcc_comparison)
2327 (simplify
2328 (cmp (negate @0) (negate @1))
2329 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2330 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2331 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2332 (scmp @0 @1)))
2333 (simplify
2334 (cmp (negate @0) CONSTANT_CLASS_P@1)
2335 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2336 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2337 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
23f27839 2338 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
534bd33b
MG
2339 (if (tem && !TREE_OVERFLOW (tem))
2340 (scmp @0 { tem; }))))))
2341
b0eb889b
MG
2342/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
2343(for op (eq ne)
2344 (simplify
2345 (op (abs @0) zerop@1)
2346 (op @0 @1)))
2347
79d4f7c6
RB
2348/* From fold_sign_changed_comparison and fold_widened_comparison. */
2349(for cmp (simple_comparison)
2350 (simplify
2351 (cmp (convert@0 @00) (convert?@1 @10))
452ec2a5 2352 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
79d4f7c6
RB
2353 /* Disable this optimization if we're casting a function pointer
2354 type on targets that require function pointer canonicalization. */
2355 && !(targetm.have_canonicalize_funcptr_for_compare ()
2356 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
2fde61e3
RB
2357 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
2358 && single_use (@0))
79d4f7c6
RB
2359 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
2360 && (TREE_CODE (@10) == INTEGER_CST
2361 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
2362 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
2363 || cmp == NE_EXPR
2364 || cmp == EQ_EXPR)
2365 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
2366 /* ??? The special-casing of INTEGER_CST conversion was in the original
2367 code and here to avoid a spurious overflow flag on the resulting
2368 constant which fold_convert produces. */
2369 (if (TREE_CODE (@1) == INTEGER_CST)
2370 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
2371 TREE_OVERFLOW (@1)); })
2372 (cmp @00 (convert @1)))
2373
2374 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
2375 /* If possible, express the comparison in the shorter mode. */
2376 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
7fd82d52
PP
2377 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
2378 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
2379 && TYPE_UNSIGNED (TREE_TYPE (@00))))
79d4f7c6
RB
2380 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
2381 || ((TYPE_PRECISION (TREE_TYPE (@00))
2382 >= TYPE_PRECISION (TREE_TYPE (@10)))
2383 && (TYPE_UNSIGNED (TREE_TYPE (@00))
2384 == TYPE_UNSIGNED (TREE_TYPE (@10))))
2385 || (TREE_CODE (@10) == INTEGER_CST
f6c15759 2386 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
2387 && int_fits_type_p (@10, TREE_TYPE (@00)))))
2388 (cmp @00 (convert @10))
2389 (if (TREE_CODE (@10) == INTEGER_CST
f6c15759 2390 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
79d4f7c6
RB
2391 && !int_fits_type_p (@10, TREE_TYPE (@00)))
2392 (with
2393 {
2394 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2395 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2396 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
2397 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
2398 }
2399 (if (above || below)
2400 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2401 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
2402 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2403 { constant_boolean_node (above ? true : false, type); }
2404 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2405 { constant_boolean_node (above ? false : true, type); }))))))))))))
66e1cacf 2406
96a111a3
RB
2407(for cmp (eq ne)
2408 /* A local variable can never be pointed to by
2409 the default SSA name of an incoming parameter.
2410 SSA names are canonicalized to 2nd place. */
2411 (simplify
2412 (cmp addr@0 SSA_NAME@1)
2413 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
2414 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
2415 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
2416 (if (TREE_CODE (base) == VAR_DECL
2417 && auto_var_in_fn_p (base, current_function_decl))
2418 (if (cmp == NE_EXPR)
2419 { constant_boolean_node (true, type); }
2420 { constant_boolean_node (false, type); }))))))
2421
66e1cacf
RB
2422/* Equality compare simplifications from fold_binary */
2423(for cmp (eq ne)
2424
2425 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
2426 Similarly for NE_EXPR. */
2427 (simplify
2428 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
2429 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2430 && wi::bit_and_not (@1, @2) != 0)
2431 { constant_boolean_node (cmp == NE_EXPR, type); }))
2432
2433 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
2434 (simplify
2435 (cmp (bit_xor @0 @1) integer_zerop)
2436 (cmp @0 @1))
2437
2438 /* (X ^ Y) == Y becomes X == 0.
2439 Likewise (X ^ Y) == X becomes Y == 0. */
2440 (simplify
99e943a2 2441 (cmp:c (bit_xor:c @0 @1) @0)
66e1cacf
RB
2442 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
2443
2444 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
2445 (simplify
2446 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
2447 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
d057c866 2448 (cmp @0 (bit_xor @1 (convert @2)))))
d057c866
RB
2449
2450 (simplify
2451 (cmp (convert? addr@0) integer_zerop)
2452 (if (tree_single_nonzero_warnv_p (@0, NULL))
2453 { constant_boolean_node (cmp == NE_EXPR, type); })))
2454
b0eb889b
MG
2455/* If we have (A & C) == C where C is a power of 2, convert this into
2456 (A & C) != 0. Similarly for NE_EXPR. */
2457(for cmp (eq ne)
2458 icmp (ne eq)
2459 (simplify
2460 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
2461 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
2462
2463/* If we have (A & C) != 0 where C is the sign bit of A, convert
2464 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
2465(for cmp (eq ne)
2466 ncmp (ge lt)
2467 (simplify
2468 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
2469 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2470 && (TYPE_PRECISION (TREE_TYPE (@0))
2471 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
2472 && element_precision (@2) >= element_precision (@0)
2473 && wi::only_sign_bit_p (@1, element_precision (@0)))
2474 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2475 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
2476
68aba1f6
RB
2477/* When the addresses are not directly of decls compare base and offset.
2478 This implements some remaining parts of fold_comparison address
2479 comparisons but still no complete part of it. Still it is good
2480 enough to make fold_stmt not regress when not dispatching to fold_binary. */
2481(for cmp (simple_comparison)
2482 (simplify
f501d5cd 2483 (cmp (convert1?@2 addr@0) (convert2? addr@1))
68aba1f6
RB
2484 (with
2485 {
2486 HOST_WIDE_INT off0, off1;
2487 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
2488 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
2489 if (base0 && TREE_CODE (base0) == MEM_REF)
2490 {
2491 off0 += mem_ref_offset (base0).to_short_addr ();
2492 base0 = TREE_OPERAND (base0, 0);
2493 }
2494 if (base1 && TREE_CODE (base1) == MEM_REF)
2495 {
2496 off1 += mem_ref_offset (base1).to_short_addr ();
2497 base1 = TREE_OPERAND (base1, 0);
2498 }
2499 }
da571fda
RB
2500 (if (base0 && base1)
2501 (with
2502 {
aad88aed 2503 int equal = 2;
da571fda
RB
2504 if (decl_in_symtab_p (base0)
2505 && decl_in_symtab_p (base1))
2506 equal = symtab_node::get_create (base0)
2507 ->equal_address_to (symtab_node::get_create (base1));
c3bea076
RB
2508 else if ((DECL_P (base0)
2509 || TREE_CODE (base0) == SSA_NAME
2510 || TREE_CODE (base0) == STRING_CST)
2511 && (DECL_P (base1)
2512 || TREE_CODE (base1) == SSA_NAME
2513 || TREE_CODE (base1) == STRING_CST))
aad88aed 2514 equal = (base0 == base1);
da571fda
RB
2515 }
2516 (if (equal == 1
2517 && (cmp == EQ_EXPR || cmp == NE_EXPR
2518 /* If the offsets are equal we can ignore overflow. */
2519 || off0 == off1
2520 || POINTER_TYPE_OVERFLOW_UNDEFINED
c3bea076 2521 /* Or if we compare using pointers to decls or strings. */
da571fda 2522 || (POINTER_TYPE_P (TREE_TYPE (@2))
c3bea076 2523 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
da571fda
RB
2524 (switch
2525 (if (cmp == EQ_EXPR)
2526 { constant_boolean_node (off0 == off1, type); })
2527 (if (cmp == NE_EXPR)
2528 { constant_boolean_node (off0 != off1, type); })
2529 (if (cmp == LT_EXPR)
2530 { constant_boolean_node (off0 < off1, type); })
2531 (if (cmp == LE_EXPR)
2532 { constant_boolean_node (off0 <= off1, type); })
2533 (if (cmp == GE_EXPR)
2534 { constant_boolean_node (off0 >= off1, type); })
2535 (if (cmp == GT_EXPR)
2536 { constant_boolean_node (off0 > off1, type); }))
2537 (if (equal == 0
2538 && DECL_P (base0) && DECL_P (base1)
2539 /* If we compare this as integers require equal offset. */
2540 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
2541 || off0 == off1))
2542 (switch
2543 (if (cmp == EQ_EXPR)
2544 { constant_boolean_node (false, type); })
2545 (if (cmp == NE_EXPR)
2546 { constant_boolean_node (true, type); })))))))))
66e1cacf 2547
98998245
RB
2548/* Simplify pointer equality compares using PTA. */
2549(for neeq (ne eq)
2550 (simplify
2551 (neeq @0 @1)
2552 (if (POINTER_TYPE_P (TREE_TYPE (@0))
2553 && ptrs_compare_unequal (@0, @1))
2554 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
2555
8f63caf6 2556/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
467719fb
PK
2557 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
2558 Disable the transform if either operand is pointer to function.
2559 This broke pr22051-2.c for arm where function pointer
2560 canonicalizaion is not wanted. */
2561
8f63caf6
RB
2562(for cmp (ne eq)
2563 (simplify
2564 (cmp (convert @0) INTEGER_CST@1)
467719fb
PK
2565 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
2566 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2567 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
2568 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
8f63caf6
RB
2569 (cmp @0 (convert @1)))))
2570
21aacde4
RB
2571/* Non-equality compare simplifications from fold_binary */
2572(for cmp (lt gt le ge)
2573 /* Comparisons with the highest or lowest possible integer of
2574 the specified precision will have known values. */
2575 (simplify
2576 (cmp (convert?@2 @0) INTEGER_CST@1)
2577 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2578 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
2579 (with
2580 {
2581 tree arg1_type = TREE_TYPE (@1);
2582 unsigned int prec = TYPE_PRECISION (arg1_type);
2583 wide_int max = wi::max_value (arg1_type);
2584 wide_int signed_max = wi::max_value (prec, SIGNED);
2585 wide_int min = wi::min_value (arg1_type);
2586 }
2587 (switch
2588 (if (wi::eq_p (@1, max))
2589 (switch
2590 (if (cmp == GT_EXPR)
2591 { constant_boolean_node (false, type); })
2592 (if (cmp == GE_EXPR)
2593 (eq @2 @1))
2594 (if (cmp == LE_EXPR)
2595 { constant_boolean_node (true, type); })
2596 (if (cmp == LT_EXPR)
2597 (ne @2 @1))))
21aacde4
RB
2598 (if (wi::eq_p (@1, min))
2599 (switch
2600 (if (cmp == LT_EXPR)
2601 { constant_boolean_node (false, type); })
2602 (if (cmp == LE_EXPR)
2603 (eq @2 @1))
2604 (if (cmp == GE_EXPR)
2605 { constant_boolean_node (true, type); })
2606 (if (cmp == GT_EXPR)
2607 (ne @2 @1))))
9bc22d19
RB
2608 (if (wi::eq_p (@1, max - 1))
2609 (switch
2610 (if (cmp == GT_EXPR)
2611 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
2612 (if (cmp == LE_EXPR)
2613 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
21aacde4
RB
2614 (if (wi::eq_p (@1, min + 1))
2615 (switch
2616 (if (cmp == GE_EXPR)
2617 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
2618 (if (cmp == LT_EXPR)
2619 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2620 (if (wi::eq_p (@1, signed_max)
2621 && TYPE_UNSIGNED (arg1_type)
2622 /* We will flip the signedness of the comparison operator
2623 associated with the mode of @1, so the sign bit is
2624 specified by this mode. Check that @1 is the signed
2625 max associated with this sign bit. */
2626 && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
2627 /* signed_type does not work on pointer types. */
2628 && INTEGRAL_TYPE_P (arg1_type))
2629 /* The following case also applies to X < signed_max+1
2630 and X >= signed_max+1 because previous transformations. */
2631 (if (cmp == LE_EXPR || cmp == GT_EXPR)
2632 (with { tree st = signed_type_for (arg1_type); }
2633 (if (cmp == LE_EXPR)
2634 (ge (convert:st @0) { build_zero_cst (st); })
2635 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
2636
b5d3d787
RB
2637(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
2638 /* If the second operand is NaN, the result is constant. */
2639 (simplify
2640 (cmp @0 REAL_CST@1)
2641 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2642 && (cmp != LTGT_EXPR || ! flag_trapping_math))
50301115 2643 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
b5d3d787 2644 ? false : true, type); })))
21aacde4 2645
55cf3946
RB
2646/* bool_var != 0 becomes bool_var. */
2647(simplify
b5d3d787 2648 (ne @0 integer_zerop)
55cf3946
RB
2649 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
2650 && types_match (type, TREE_TYPE (@0)))
2651 (non_lvalue @0)))
2652/* bool_var == 1 becomes bool_var. */
2653(simplify
b5d3d787 2654 (eq @0 integer_onep)
55cf3946
RB
2655 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
2656 && types_match (type, TREE_TYPE (@0)))
2657 (non_lvalue @0)))
b5d3d787
RB
2658/* Do not handle
2659 bool_var == 0 becomes !bool_var or
2660 bool_var != 1 becomes !bool_var
2661 here because that only is good in assignment context as long
2662 as we require a tcc_comparison in GIMPLE_CONDs where we'd
2663 replace if (x == 0) with tem = ~x; if (tem != 0) which is
2664 clearly less optimal and which we'll transform again in forwprop. */
55cf3946 2665
ca1206be
MG
2666/* When one argument is a constant, overflow detection can be simplified.
2667 Currently restricted to single use so as not to interfere too much with
2668 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
2669 A + CST CMP A -> A CMP' CST' */
2670(for cmp (lt le ge gt)
2671 out (gt gt le le)
2672 (simplify
a8e9f9a3 2673 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
ca1206be
MG
2674 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2675 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
2676 && wi::ne_p (@1, 0)
2677 && single_use (@2))
2678 (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
2679 (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
2680
3563f78f
MG
2681/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
2682 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
2683 expects the long form, so we restrict the transformation for now. */
2684(for cmp (gt le)
2685 (simplify
a8e9f9a3 2686 (cmp:c (minus@2 @0 @1) @0)
3563f78f
MG
2687 (if (single_use (@2)
2688 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2689 && TYPE_UNSIGNED (TREE_TYPE (@0))
2690 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2691 (cmp @1 @0))))
3563f78f
MG
2692
2693/* Testing for overflow is unnecessary if we already know the result. */
3563f78f
MG
2694/* A - B > A */
2695(for cmp (gt le)
2696 out (ne eq)
2697 (simplify
a8e9f9a3 2698 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3563f78f
MG
2699 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2700 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
2701 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
2702/* A + B < A */
2703(for cmp (lt ge)
2704 out (ne eq)
2705 (simplify
a8e9f9a3 2706 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3563f78f
MG
2707 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
2708 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
2709 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
2710
603aeb87 2711/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
0557293f 2712 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
0557293f
AM
2713(for cmp (lt ge)
2714 out (ne eq)
2715 (simplify
603aeb87 2716 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
0557293f
AM
2717 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
2718 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
2719 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
55cf3946 2720
53f3cd25
RS
2721/* Simplification of math builtins. These rules must all be optimizations
2722 as well as IL simplifications. If there is a possibility that the new
2723 form could be a pessimization, the rule should go in the canonicalization
2724 section that follows this one.
e18c1d66 2725
53f3cd25
RS
2726 Rules can generally go in this section if they satisfy one of
2727 the following:
2728
2729 - the rule describes an identity
2730
2731 - the rule replaces calls with something as simple as addition or
2732 multiplication
2733
2734 - the rule contains unary calls only and simplifies the surrounding
2735 arithmetic. (The idea here is to exclude non-unary calls in which
2736 one operand is constant and in which the call is known to be cheap
2737 when the operand has that value.) */
52c6378a 2738
53f3cd25 2739(if (flag_unsafe_math_optimizations)
52c6378a
N
2740 /* Simplify sqrt(x) * sqrt(x) -> x. */
2741 (simplify
2742 (mult (SQRT@1 @0) @1)
2743 (if (!HONOR_SNANS (type))
2744 @0))
2745
35401640
N
2746 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
2747 (for root (SQRT CBRT)
2748 (simplify
2749 (mult (root:s @0) (root:s @1))
2750 (root (mult @0 @1))))
2751
35401640
N
2752 /* Simplify expN(x) * expN(y) -> expN(x+y). */
2753 (for exps (EXP EXP2 EXP10 POW10)
2754 (simplify
2755 (mult (exps:s @0) (exps:s @1))
2756 (exps (plus @0 @1))))
2757
52c6378a 2758 /* Simplify a/root(b/c) into a*root(c/b). */
35401640
N
2759 (for root (SQRT CBRT)
2760 (simplify
2761 (rdiv @0 (root:s (rdiv:s @1 @2)))
2762 (mult @0 (root (rdiv @2 @1)))))
2763
2764 /* Simplify x/expN(y) into x*expN(-y). */
2765 (for exps (EXP EXP2 EXP10 POW10)
2766 (simplify
2767 (rdiv @0 (exps:s @1))
2768 (mult @0 (exps (negate @1)))))
52c6378a 2769
eee7b6c4
RB
2770 (for logs (LOG LOG2 LOG10 LOG10)
2771 exps (EXP EXP2 EXP10 POW10)
8acda9b2 2772 /* logN(expN(x)) -> x. */
e18c1d66
RB
2773 (simplify
2774 (logs (exps @0))
8acda9b2
RS
2775 @0)
2776 /* expN(logN(x)) -> x. */
2777 (simplify
2778 (exps (logs @0))
2779 @0))
53f3cd25 2780
e18c1d66
RB
2781 /* Optimize logN(func()) for various exponential functions. We
2782 want to determine the value "x" and the power "exponent" in
2783 order to transform logN(x**exponent) into exponent*logN(x). */
eee7b6c4
RB
2784 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
2785 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
e18c1d66
RB
2786 (simplify
2787 (logs (exps @0))
c9e926ce
RS
2788 (if (SCALAR_FLOAT_TYPE_P (type))
2789 (with {
2790 tree x;
2791 switch (exps)
2792 {
2793 CASE_CFN_EXP:
2794 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
2795 x = build_real_truncate (type, dconst_e ());
2796 break;
2797 CASE_CFN_EXP2:
2798 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
2799 x = build_real (type, dconst2);
2800 break;
2801 CASE_CFN_EXP10:
2802 CASE_CFN_POW10:
2803 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
2804 {
2805 REAL_VALUE_TYPE dconst10;
2806 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
2807 x = build_real (type, dconst10);
2808 }
2809 break;
2810 default:
2811 gcc_unreachable ();
2812 }
2813 }
2814 (mult (logs { x; }) @0)))))
53f3cd25 2815
e18c1d66
RB
2816 (for logs (LOG LOG
2817 LOG2 LOG2
2818 LOG10 LOG10)
2819 exps (SQRT CBRT)
2820 (simplify
2821 (logs (exps @0))
c9e926ce
RS
2822 (if (SCALAR_FLOAT_TYPE_P (type))
2823 (with {
2824 tree x;
2825 switch (exps)
2826 {
2827 CASE_CFN_SQRT:
2828 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
2829 x = build_real (type, dconsthalf);
2830 break;
2831 CASE_CFN_CBRT:
2832 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
2833 x = build_real_truncate (type, dconst_third ());
2834 break;
2835 default:
2836 gcc_unreachable ();
2837 }
2838 }
2839 (mult { x; } (logs @0))))))
53f3cd25
RS
2840
2841 /* logN(pow(x,exponent)) -> exponent*logN(x). */
e18c1d66
RB
2842 (for logs (LOG LOG2 LOG10)
2843 pows (POW)
2844 (simplify
2845 (logs (pows @0 @1))
53f3cd25
RS
2846 (mult @1 (logs @0))))
2847
2848 (for sqrts (SQRT)
2849 cbrts (CBRT)
b4838d77 2850 pows (POW)
53f3cd25
RS
2851 exps (EXP EXP2 EXP10 POW10)
2852 /* sqrt(expN(x)) -> expN(x*0.5). */
2853 (simplify
2854 (sqrts (exps @0))
2855 (exps (mult @0 { build_real (type, dconsthalf); })))
2856 /* cbrt(expN(x)) -> expN(x/3). */
2857 (simplify
2858 (cbrts (exps @0))
b4838d77
RS
2859 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
2860 /* pow(expN(x), y) -> expN(x*y). */
2861 (simplify
2862 (pows (exps @0) @1)
2863 (exps (mult @0 @1))))
cfed37a0
RS
2864
2865 /* tan(atan(x)) -> x. */
2866 (for tans (TAN)
2867 atans (ATAN)
2868 (simplify
2869 (tans (atans @0))
2870 @0)))
53f3cd25 2871
abcc43f5
RS
2872/* cabs(x+0i) or cabs(0+xi) -> abs(x). */
2873(simplify
e04d2a35 2874 (CABS (complex:C @0 real_zerop@1))
abcc43f5
RS
2875 (abs @0))
2876
67dbe582
RS
2877/* trunc(trunc(x)) -> trunc(x), etc. */
2878(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
2879 (simplify
2880 (fns (fns @0))
2881 (fns @0)))
2882/* f(x) -> x if x is integer valued and f does nothing for such values. */
afeb246c 2883(for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
67dbe582
RS
2884 (simplify
2885 (fns integer_valued_real_p@0)
2886 @0))
67dbe582 2887
4d7836c4
RS
2888/* hypot(x,0) and hypot(0,x) -> abs(x). */
2889(simplify
c9e926ce 2890 (HYPOT:c @0 real_zerop@1)
4d7836c4
RS
2891 (abs @0))
2892
b4838d77
RS
2893/* pow(1,x) -> 1. */
2894(simplify
2895 (POW real_onep@0 @1)
2896 @0)
2897
461e4145
RS
2898(simplify
2899 /* copysign(x,x) -> x. */
2900 (COPYSIGN @0 @0)
2901 @0)
2902
2903(simplify
2904 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
2905 (COPYSIGN @0 tree_expr_nonnegative_p@1)
2906 (abs @0))
2907
86c0733f
RS
2908(for scale (LDEXP SCALBN SCALBLN)
2909 /* ldexp(0, x) -> 0. */
2910 (simplify
2911 (scale real_zerop@0 @1)
2912 @0)
2913 /* ldexp(x, 0) -> x. */
2914 (simplify
2915 (scale @0 integer_zerop@1)
2916 @0)
2917 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
2918 (simplify
2919 (scale REAL_CST@0 @1)
2920 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
2921 @0)))
2922
53f3cd25
RS
2923/* Canonicalization of sequences of math builtins. These rules represent
2924 IL simplifications but are not necessarily optimizations.
2925
2926 The sincos pass is responsible for picking "optimal" implementations
2927 of math builtins, which may be more complicated and can sometimes go
2928 the other way, e.g. converting pow into a sequence of sqrts.
2929 We only want to do these canonicalizations before the pass has run. */
2930
2931(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
2932 /* Simplify tan(x) * cos(x) -> sin(x). */
2933 (simplify
2934 (mult:c (TAN:s @0) (COS:s @0))
2935 (SIN @0))
2936
2937 /* Simplify x * pow(x,c) -> pow(x,c+1). */
2938 (simplify
de3fbea3 2939 (mult:c @0 (POW:s @0 REAL_CST@1))
53f3cd25
RS
2940 (if (!TREE_OVERFLOW (@1))
2941 (POW @0 (plus @1 { build_one_cst (type); }))))
2942
2943 /* Simplify sin(x) / cos(x) -> tan(x). */
2944 (simplify
2945 (rdiv (SIN:s @0) (COS:s @0))
2946 (TAN @0))
2947
2948 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
2949 (simplify
2950 (rdiv (COS:s @0) (SIN:s @0))
2951 (rdiv { build_one_cst (type); } (TAN @0)))
2952
2953 /* Simplify sin(x) / tan(x) -> cos(x). */
2954 (simplify
2955 (rdiv (SIN:s @0) (TAN:s @0))
2956 (if (! HONOR_NANS (@0)
2957 && ! HONOR_INFINITIES (@0))
c9e926ce 2958 (COS @0)))
53f3cd25
RS
2959
2960 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
2961 (simplify
2962 (rdiv (TAN:s @0) (SIN:s @0))
2963 (if (! HONOR_NANS (@0)
2964 && ! HONOR_INFINITIES (@0))
2965 (rdiv { build_one_cst (type); } (COS @0))))
2966
2967 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
2968 (simplify
2969 (mult (POW:s @0 @1) (POW:s @0 @2))
2970 (POW @0 (plus @1 @2)))
2971
2972 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
2973 (simplify
2974 (mult (POW:s @0 @1) (POW:s @2 @1))
2975 (POW (mult @0 @2) @1))
2976
de3fbea3
RB
2977 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
2978 (simplify
2979 (mult (POWI:s @0 @1) (POWI:s @2 @1))
2980 (POWI (mult @0 @2) @1))
2981
53f3cd25
RS
2982 /* Simplify pow(x,c) / x -> pow(x,c-1). */
2983 (simplify
2984 (rdiv (POW:s @0 REAL_CST@1) @0)
2985 (if (!TREE_OVERFLOW (@1))
2986 (POW @0 (minus @1 { build_one_cst (type); }))))
2987
2988 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
2989 (simplify
2990 (rdiv @0 (POW:s @1 @2))
2991 (mult @0 (POW @1 (negate @2))))
2992
2993 (for sqrts (SQRT)
2994 cbrts (CBRT)
2995 pows (POW)
2996 /* sqrt(sqrt(x)) -> pow(x,1/4). */
2997 (simplify
2998 (sqrts (sqrts @0))
2999 (pows @0 { build_real (type, dconst_quarter ()); }))
3000 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3001 (simplify
3002 (sqrts (cbrts @0))
3003 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3004 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3005 (simplify
3006 (cbrts (sqrts @0))
3007 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3008 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3009 (simplify
3010 (cbrts (cbrts tree_expr_nonnegative_p@0))
3011 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3012 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3013 (simplify
3014 (sqrts (pows @0 @1))
3015 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3016 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3017 (simplify
3018 (cbrts (pows tree_expr_nonnegative_p@0 @1))
b4838d77
RS
3019 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3020 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3021 (simplify
3022 (pows (sqrts @0) @1)
3023 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3024 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
3025 (simplify
3026 (pows (cbrts tree_expr_nonnegative_p@0) @1)
3027 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3028 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
3029 (simplify
3030 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
3031 (pows @0 (mult @1 @2))))
abcc43f5
RS
3032
3033 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
3034 (simplify
3035 (CABS (complex @0 @0))
96285749
RS
3036 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3037
4d7836c4
RS
3038 /* hypot(x,x) -> fabs(x)*sqrt(2). */
3039 (simplify
3040 (HYPOT @0 @0)
3041 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3042
96285749
RS
3043 /* cexp(x+yi) -> exp(x)*cexpi(y). */
3044 (for cexps (CEXP)
3045 exps (EXP)
3046 cexpis (CEXPI)
3047 (simplify
3048 (cexps compositional_complex@0)
3049 (if (targetm.libc_has_function (function_c99_math_complex))
3050 (complex
3051 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
3052 (mult @1 (imagpart @2)))))))
e18c1d66 3053
67dbe582
RS
3054(if (canonicalize_math_p ())
3055 /* floor(x) -> trunc(x) if x is nonnegative. */
3056 (for floors (FLOOR)
3057 truncs (TRUNC)
3058 (simplify
3059 (floors tree_expr_nonnegative_p@0)
3060 (truncs @0))))
3061
3062(match double_value_p
3063 @0
3064 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
3065(for froms (BUILT_IN_TRUNCL
3066 BUILT_IN_FLOORL
3067 BUILT_IN_CEILL
3068 BUILT_IN_ROUNDL
3069 BUILT_IN_NEARBYINTL
3070 BUILT_IN_RINTL)
3071 tos (BUILT_IN_TRUNC
3072 BUILT_IN_FLOOR
3073 BUILT_IN_CEIL
3074 BUILT_IN_ROUND
3075 BUILT_IN_NEARBYINT
3076 BUILT_IN_RINT)
3077 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
3078 (if (optimize && canonicalize_math_p ())
3079 (simplify
3080 (froms (convert double_value_p@0))
3081 (convert (tos @0)))))
3082
3083(match float_value_p
3084 @0
3085 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
3086(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
3087 BUILT_IN_FLOORL BUILT_IN_FLOOR
3088 BUILT_IN_CEILL BUILT_IN_CEIL
3089 BUILT_IN_ROUNDL BUILT_IN_ROUND
3090 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
3091 BUILT_IN_RINTL BUILT_IN_RINT)
3092 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
3093 BUILT_IN_FLOORF BUILT_IN_FLOORF
3094 BUILT_IN_CEILF BUILT_IN_CEILF
3095 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
3096 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
3097 BUILT_IN_RINTF BUILT_IN_RINTF)
3098 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
3099 if x is a float. */
5dac7dbd
JDA
3100 (if (optimize && canonicalize_math_p ()
3101 && targetm.libc_has_function (function_c99_misc))
67dbe582
RS
3102 (simplify
3103 (froms (convert float_value_p@0))
3104 (convert (tos @0)))))
3105
543a9bcd
RS
3106(for froms (XFLOORL XCEILL XROUNDL XRINTL)
3107 tos (XFLOOR XCEIL XROUND XRINT)
3108 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
3109 (if (optimize && canonicalize_math_p ())
3110 (simplify
3111 (froms (convert double_value_p@0))
3112 (tos @0))))
3113
3114(for froms (XFLOORL XCEILL XROUNDL XRINTL
3115 XFLOOR XCEIL XROUND XRINT)
3116 tos (XFLOORF XCEILF XROUNDF XRINTF)
3117 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
3118 if x is a float. */
3119 (if (optimize && canonicalize_math_p ())
3120 (simplify
3121 (froms (convert float_value_p@0))
3122 (tos @0))))
3123
3124(if (canonicalize_math_p ())
3125 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
3126 (for floors (IFLOOR LFLOOR LLFLOOR)
3127 (simplify
3128 (floors tree_expr_nonnegative_p@0)
3129 (fix_trunc @0))))
3130
3131(if (canonicalize_math_p ())
3132 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
3133 (for fns (IFLOOR LFLOOR LLFLOOR
3134 ICEIL LCEIL LLCEIL
3135 IROUND LROUND LLROUND)
3136 (simplify
3137 (fns integer_valued_real_p@0)
3138 (fix_trunc @0)))
3139 (if (!flag_errno_math)
3140 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
3141 (for rints (IRINT LRINT LLRINT)
3142 (simplify
3143 (rints integer_valued_real_p@0)
3144 (fix_trunc @0)))))
3145
3146(if (canonicalize_math_p ())
3147 (for ifn (IFLOOR ICEIL IROUND IRINT)
3148 lfn (LFLOOR LCEIL LROUND LRINT)
3149 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
3150 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
3151 sizeof (int) == sizeof (long). */
3152 (if (TYPE_PRECISION (integer_type_node)
3153 == TYPE_PRECISION (long_integer_type_node))
3154 (simplify
3155 (ifn @0)
3156 (lfn:long_integer_type_node @0)))
3157 /* Canonicalize llround (x) to lround (x) on LP64 targets where
3158 sizeof (long long) == sizeof (long). */
3159 (if (TYPE_PRECISION (long_long_integer_type_node)
3160 == TYPE_PRECISION (long_integer_type_node))
3161 (simplify
3162 (llfn @0)
3163 (lfn:long_integer_type_node @0)))))
3164
92c52eab
RS
3165/* cproj(x) -> x if we're ignoring infinities. */
3166(simplify
3167 (CPROJ @0)
3168 (if (!HONOR_INFINITIES (type))
3169 @0))
3170
4534c203
RB
3171/* If the real part is inf and the imag part is known to be
3172 nonnegative, return (inf + 0i). */
3173(simplify
3174 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
3175 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
92c52eab
RS
3176 { build_complex_inf (type, false); }))
3177
4534c203
RB
3178/* If the imag part is inf, return (inf+I*copysign(0,imag)). */
3179(simplify
3180 (CPROJ (complex @0 REAL_CST@1))
3181 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
92c52eab 3182 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4534c203 3183
b4838d77
RS
3184(for pows (POW)
3185 sqrts (SQRT)
3186 cbrts (CBRT)
3187 (simplify
3188 (pows @0 REAL_CST@1)
3189 (with {
3190 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
3191 REAL_VALUE_TYPE tmp;
3192 }
3193 (switch
3194 /* pow(x,0) -> 1. */
3195 (if (real_equal (value, &dconst0))
3196 { build_real (type, dconst1); })
3197 /* pow(x,1) -> x. */
3198 (if (real_equal (value, &dconst1))
3199 @0)
3200 /* pow(x,-1) -> 1/x. */
3201 (if (real_equal (value, &dconstm1))
3202 (rdiv { build_real (type, dconst1); } @0))
3203 /* pow(x,0.5) -> sqrt(x). */
3204 (if (flag_unsafe_math_optimizations
3205 && canonicalize_math_p ()
3206 && real_equal (value, &dconsthalf))
3207 (sqrts @0))
3208 /* pow(x,1/3) -> cbrt(x). */
3209 (if (flag_unsafe_math_optimizations
3210 && canonicalize_math_p ()
3211 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
3212 real_equal (value, &tmp)))
3213 (cbrts @0))))))
4534c203 3214
5ddc84ca
RS
3215/* powi(1,x) -> 1. */
3216(simplify
3217 (POWI real_onep@0 @1)
3218 @0)
3219
3220(simplify
3221 (POWI @0 INTEGER_CST@1)
3222 (switch
3223 /* powi(x,0) -> 1. */
3224 (if (wi::eq_p (@1, 0))
3225 { build_real (type, dconst1); })
3226 /* powi(x,1) -> x. */
3227 (if (wi::eq_p (@1, 1))
3228 @0)
3229 /* powi(x,-1) -> 1/x. */
3230 (if (wi::eq_p (@1, -1))
3231 (rdiv { build_real (type, dconst1); } @0))))
3232
be144838
JL
3233/* Narrowing of arithmetic and logical operations.
3234
3235 These are conceptually similar to the transformations performed for
3236 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
3237 term we want to move all that code out of the front-ends into here. */
3238
3239/* If we have a narrowing conversion of an arithmetic operation where
3240 both operands are widening conversions from the same type as the outer
3241 narrowing conversion. Then convert the innermost operands to a suitable
9c582551 3242 unsigned type (to avoid introducing undefined behavior), perform the
be144838
JL
3243 operation and convert the result to the desired type. */
3244(for op (plus minus)
3245 (simplify
44fc0a51 3246 (convert (op:s (convert@2 @0) (convert@3 @1)))
be144838
JL
3247 (if (INTEGRAL_TYPE_P (type)
3248 /* We check for type compatibility between @0 and @1 below,
3249 so there's no need to check that @1/@3 are integral types. */
3250 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3251 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3252 /* The precision of the type of each operand must match the
3253 precision of the mode of each operand, similarly for the
3254 result. */
3255 && (TYPE_PRECISION (TREE_TYPE (@0))
3256 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3257 && (TYPE_PRECISION (TREE_TYPE (@1))
3258 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3259 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3260 /* The inner conversion must be a widening conversion. */
3261 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
aea417d7 3262 && types_match (@0, @1)
44fc0a51 3263 && types_match (@0, type))
be144838 3264 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
8fdc6c67
RB
3265 (convert (op @0 @1))
3266 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3267 (convert (op (convert:utype @0) (convert:utype @1))))))))
48451e8f
JL
3268
3269/* This is another case of narrowing, specifically when there's an outer
3270 BIT_AND_EXPR which masks off bits outside the type of the innermost
3271 operands. Like the previous case we have to convert the operands
9c582551 3272 to unsigned types to avoid introducing undefined behavior for the
48451e8f
JL
3273 arithmetic operation. */
3274(for op (minus plus)
8fdc6c67
RB
3275 (simplify
3276 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
3277 (if (INTEGRAL_TYPE_P (type)
3278 /* We check for type compatibility between @0 and @1 below,
3279 so there's no need to check that @1/@3 are integral types. */
3280 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3281 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3282 /* The precision of the type of each operand must match the
3283 precision of the mode of each operand, similarly for the
3284 result. */
3285 && (TYPE_PRECISION (TREE_TYPE (@0))
3286 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3287 && (TYPE_PRECISION (TREE_TYPE (@1))
3288 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3289 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3290 /* The inner conversion must be a widening conversion. */
3291 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3292 && types_match (@0, @1)
3293 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
3294 <= TYPE_PRECISION (TREE_TYPE (@0)))
0a8c1e23
JL
3295 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
3296 true, TYPE_PRECISION (type))) == 0))
8fdc6c67
RB
3297 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3298 (with { tree ntype = TREE_TYPE (@0); }
3299 (convert (bit_and (op @0 @1) (convert:ntype @4))))
3300 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3301 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
3302 (convert:utype @4))))))))
4f7a5692
MC
3303
3304/* Transform (@0 < @1 and @0 < @2) to use min,
3305 (@0 > @1 and @0 > @2) to use max */
3306(for op (lt le gt ge)
3307 ext (min min max max)
3308 (simplify
4618c453
RB
3309 (bit_and (op:cs @0 @1) (op:cs @0 @2))
3310 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3311 && TREE_CODE (@0) != INTEGER_CST)
4f7a5692
MC
3312 (op @0 (ext @1 @2)))))
3313
7317ef4a
RS
3314(simplify
3315 /* signbit(x) -> 0 if x is nonnegative. */
3316 (SIGNBIT tree_expr_nonnegative_p@0)
3317 { integer_zero_node; })
3318
3319(simplify
3320 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
3321 (SIGNBIT @0)
3322 (if (!HONOR_SIGNED_ZEROS (@0))
3323 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
a8b85ce9
MG
3324
3325/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
3326(for cmp (eq ne)
3327 (for op (plus minus)
3328 rop (minus plus)
3329 (simplify
3330 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3331 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3332 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
3333 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
3334 && !TYPE_SATURATING (TREE_TYPE (@0)))
3335 (with { tree res = int_const_binop (rop, @2, @1); }
3336 (if (TREE_OVERFLOW (res))
3337 { constant_boolean_node (cmp == NE_EXPR, type); }
3338 (if (single_use (@3))
3339 (cmp @0 { res; }))))))))
3340(for cmp (lt le gt ge)
3341 (for op (plus minus)
3342 rop (minus plus)
3343 (simplify
3344 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3345 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3346 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3347 (with { tree res = int_const_binop (rop, @2, @1); }
3348 (if (TREE_OVERFLOW (res))
3349 {
3350 fold_overflow_warning (("assuming signed overflow does not occur "
3351 "when simplifying conditional to constant"),
3352 WARN_STRICT_OVERFLOW_CONDITIONAL);
3353 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
3354 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
3355 bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
3356 != (op == MINUS_EXPR);
3357 constant_boolean_node (less == ovf_high, type);
3358 }
3359 (if (single_use (@3))
3360 (with
3361 {
3362 fold_overflow_warning (("assuming signed overflow does not occur "
3363 "when changing X +- C1 cmp C2 to "
3364 "X cmp C2 -+ C1"),
3365 WARN_STRICT_OVERFLOW_COMPARISON);
3366 }
3367 (cmp @0 { res; })))))))))
d3e40b76
RB
3368
3369/* Canonicalizations of BIT_FIELD_REFs. */
3370
3371(simplify
3372 (BIT_FIELD_REF @0 @1 @2)
3373 (switch
3374 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
3375 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3376 (switch
3377 (if (integer_zerop (@2))
3378 (view_convert (realpart @0)))
3379 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3380 (view_convert (imagpart @0)))))
3381 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3382 && INTEGRAL_TYPE_P (type)
171f6f05
RB
3383 /* On GIMPLE this should only apply to register arguments. */
3384 && (! GIMPLE || is_gimple_reg (@0))
d3e40b76
RB
3385 /* A bit-field-ref that referenced the full argument can be stripped. */
3386 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
3387 && integer_zerop (@2))
3388 /* Low-parts can be reduced to integral conversions.
3389 ??? The following doesn't work for PDP endian. */
3390 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
3391 /* Don't even think about BITS_BIG_ENDIAN. */
3392 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
3393 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
3394 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
3395 ? (TYPE_PRECISION (TREE_TYPE (@0))
3396 - TYPE_PRECISION (type))
3397 : 0)) == 0)))
3398 (convert @0))))
3399
3400/* Simplify vector extracts. */
3401
3402(simplify
3403 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
3404 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
3405 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
3406 || (VECTOR_TYPE_P (type)
3407 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
3408 (with
3409 {
3410 tree ctor = (TREE_CODE (@0) == SSA_NAME
3411 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
3412 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
3413 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
3414 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
3415 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
3416 }
3417 (if (n != 0
3418 && (idx % width) == 0
3419 && (n % width) == 0
3420 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
3421 (with
3422 {
3423 idx = idx / width;
3424 n = n / width;
3425 /* Constructor elements can be subvectors. */
3426 unsigned HOST_WIDE_INT k = 1;
3427 if (CONSTRUCTOR_NELTS (ctor) != 0)
3428 {
3429 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
3430 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
3431 k = TYPE_VECTOR_SUBPARTS (cons_elem);
3432 }
3433 }
3434 (switch
3435 /* We keep an exact subset of the constructor elements. */
3436 (if ((idx % k) == 0 && (n % k) == 0)
3437 (if (CONSTRUCTOR_NELTS (ctor) == 0)
3438 { build_constructor (type, NULL); }
3439 (with
3440 {
3441 idx /= k;
3442 n /= k;
3443 }
3444 (if (n == 1)
3445 (if (idx < CONSTRUCTOR_NELTS (ctor))
3446 { CONSTRUCTOR_ELT (ctor, idx)->value; }
3447 { build_zero_cst (type); })
3448 {
3449 vec<constructor_elt, va_gc> *vals;
3450 vec_alloc (vals, n);
3451 for (unsigned i = 0;
3452 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
3453 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
3454 CONSTRUCTOR_ELT (ctor, idx + i)->value);
3455 build_constructor (type, vals);
3456 }))))
3457 /* The bitfield references a single constructor element. */
3458 (if (idx + n <= (idx / k + 1) * k)
3459 (switch
3460 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
3461 { build_zero_cst (type); })
3462 (if (n == k)
3463 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
3464 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
3465 @1 { bitsize_int ((idx % k) * width); })))))))))